+/**
+ * @file impatomic.h
+ * @brief Common header for C11/C++11 atomics
+ *
+ * Note that some features are unavailable, as they require support from a true
+ * C11/C++11 compiler.
+ */
+
+#ifndef __IMPATOMIC_H__
+#define __IMPATOMIC_H__
+
+#include "memoryorder.h"
+#include "cmodelint.h"
#ifdef __cplusplus
-#include <cstddef>
namespace std {
-#else
-#include <stddef.h>
-#include <stdbool.h>
#endif
-
#define CPP0X( feature )
-
-typedef enum memory_order {
- memory_order_relaxed, memory_order_acquire, memory_order_release,
- memory_order_acq_rel, memory_order_seq_cst
-} memory_order;
-
-
typedef struct atomic_flag
{
#ifdef __cplusplus
( volatile atomic_flag* );
extern void __atomic_flag_wait_explicit__
( volatile atomic_flag*, memory_order );
-extern volatile atomic_flag* __atomic_flag_for_address__
-( const volatile void* __z__ )
-__attribute__((const));
#ifdef __cplusplus
}
#endif
-#define _ATOMIC_LOAD_( __a__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- __atomic_flag_wait_explicit__( __g__, __x__ ); \
- __typeof__((__a__)->__f__) __r__ = *__p__; \
- atomic_flag_clear_explicit( __g__, __x__ ); \
- __r__; })
-
-#define _ATOMIC_STORE_( __a__, __m__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__m__) __v__ = (__m__); \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- __atomic_flag_wait_explicit__( __g__, __x__ ); \
- *__p__ = __v__; \
- atomic_flag_clear_explicit( __g__, __x__ ); \
- __v__; })
-
-#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__m__) __v__ = (__m__); \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- __atomic_flag_wait_explicit__( __g__, __x__ ); \
- __typeof__((__a__)->__f__) __r__ = *__p__; \
- *__p__ __o__ __v__; \
- atomic_flag_clear_explicit( __g__, __x__ ); \
- __r__; })
-
-#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__e__) __q__ = (__e__); \
- __typeof__(__m__) __v__ = (__m__); \
- bool __r__; \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- __atomic_flag_wait_explicit__( __g__, __x__ ); \
- __typeof__((__a__)->__f__) __t__ = *__p__; \
- if ( __t__ == *__q__ ) { *__p__ = __v__; __r__ = true; } \
- else { *__q__ = __t__; __r__ = false; } \
- atomic_flag_clear_explicit( __g__, __x__ ); \
- __r__; })
+/*
+ The remainder of the example implementation uses the following
+ macros. These macros exploit GNU extensions for value-returning
+ blocks (AKA statement expressions) and __typeof__.
+
+ The macros rely on data fields of atomic structs being named __f__.
+ Other symbols used are __a__=atomic, __e__=expected, __f__=field,
+ __g__=flag, __m__=modified, __o__=operation, __r__=result,
+ __p__=pointer to field, __v__=value (for single evaluation),
+ __x__=memory-ordering, and __y__=memory-ordering.
+*/
+
+#define _ATOMIC_LOAD_( __a__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__((__a__)->__f__) __r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__); \
+ __r__; })
+
+#define _ATOMIC_STORE_( __a__, __m__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ model_write_action((void *) __p__, __x__, (uint64_t) __v__); \
+ __v__; })
+
+
+#define _ATOMIC_INIT_( __a__, __m__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ model_init_action((void *) __p__, (uint64_t) __v__); \
+ __v__; })
+
+#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ __typeof__((__a__)->__f__) __copy__= __old__; \
+ __copy__ __o__ __v__; \
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
+ __old__; })
+
+/* No spurious failure for now */
+#define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
+
+#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__(__e__) __q__ = (__e__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ bool __r__; \
+ __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
+ if (__t__ == * __q__ ) { \
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
+ else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
+ __r__; })
#define _ATOMIC_FENCE_( __a__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- atomic_flag_fence( __g__, __x__ ); \
- })
-
-
-#define ATOMIC_INTEGRAL_LOCK_FREE 0
-#define ATOMIC_ADDRESS_LOCK_FREE 0
-
+ ({ model_fence_action(__x__);})
+
+
+#define ATOMIC_CHAR_LOCK_FREE 1
+#define ATOMIC_CHAR16_T_LOCK_FREE 1
+#define ATOMIC_CHAR32_T_LOCK_FREE 1
+#define ATOMIC_WCHAR_T_LOCK_FREE 1
+#define ATOMIC_SHORT_LOCK_FREE 1
+#define ATOMIC_INT_LOCK_FREE 1
+#define ATOMIC_LONG_LOCK_FREE 1
+#define ATOMIC_LLONG_LOCK_FREE 1
+#define ATOMIC_ADDRESS_LOCK_FREE 1
typedef struct atomic_bool
{
bool is_lock_free() const volatile;
void store( bool, memory_order = memory_order_seq_cst ) volatile;
bool load( memory_order = memory_order_seq_cst ) volatile;
- bool swap( bool, memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap ( bool&, bool, memory_order, memory_order ) volatile;
- bool compare_swap ( bool&, bool,
+ bool exchange( bool, memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_weak ( bool&, bool, memory_order, memory_order ) volatile;
+ bool compare_exchange_strong ( bool&, bool, memory_order, memory_order ) volatile;
+ bool compare_exchange_weak ( bool&, bool,
+ memory_order = memory_order_seq_cst) volatile;
+ bool compare_exchange_strong ( bool&, bool,
memory_order = memory_order_seq_cst) volatile;
void fence( memory_order ) const volatile;
friend void atomic_store_explicit( volatile atomic_bool*, bool,
memory_order );
friend bool atomic_load_explicit( volatile atomic_bool*, memory_order );
- friend bool atomic_swap_explicit( volatile atomic_bool*, bool,
+ friend bool atomic_exchange_explicit( volatile atomic_bool*, bool,
memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_bool*, bool*, bool,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_bool*, bool*, bool,
+ memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_bool*, bool*, bool,
memory_order, memory_order );
friend void atomic_fence( const volatile atomic_bool*, memory_order );
bool is_lock_free() const volatile;
void store( void*, memory_order = memory_order_seq_cst ) volatile;
void* load( memory_order = memory_order_seq_cst ) volatile;
- void* swap( void*, memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( void*&, void*, memory_order, memory_order ) volatile;
- bool compare_swap( void*&, void*,
+ void* exchange( void*, memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_weak( void*&, void*, memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( void*&, void*, memory_order, memory_order ) volatile;
+ bool compare_exchange_weak( void*&, void*,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( void*&, void*,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
void* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
friend void atomic_store_explicit( volatile atomic_address*, void*,
memory_order );
friend void* atomic_load_explicit( volatile atomic_address*, memory_order );
- friend void* atomic_swap_explicit( volatile atomic_address*, void*,
+ friend void* atomic_exchange_explicit( volatile atomic_address*, void*,
memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_address*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_address*,
+ void**, void*, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_address*,
void**, void*, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_address*, memory_order );
friend void* atomic_fetch_add_explicit( volatile atomic_address*, ptrdiff_t,
void store( char,
memory_order = memory_order_seq_cst ) volatile;
char load( memory_order = memory_order_seq_cst ) volatile;
- char swap( char,
+ char exchange( char,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( char&, char,
+ bool compare_exchange_weak( char&, char,
memory_order, memory_order ) volatile;
- bool compare_swap( char&, char,
+ bool compare_exchange_strong( char&, char,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_weak( char&, char,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( char&, char,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
char fetch_add( char,
memory_order );
friend char atomic_load_explicit( volatile atomic_char*,
memory_order );
- friend char atomic_swap_explicit( volatile atomic_char*,
+ friend char atomic_exchange_explicit( volatile atomic_char*,
char, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_char*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_char*,
+ char*, char, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_char*,
char*, char, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_char*, memory_order );
friend char atomic_fetch_add_explicit( volatile atomic_char*,
void store( signed char,
memory_order = memory_order_seq_cst ) volatile;
signed char load( memory_order = memory_order_seq_cst ) volatile;
- signed char swap( signed char,
+ signed char exchange( signed char,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( signed char&, signed char,
+ bool compare_exchange_weak( signed char&, signed char,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( signed char&, signed char,
memory_order, memory_order ) volatile;
- bool compare_swap( signed char&, signed char,
+ bool compare_exchange_weak( signed char&, signed char,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( signed char&, signed char,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
signed char fetch_add( signed char,
memory_order );
friend signed char atomic_load_explicit( volatile atomic_schar*,
memory_order );
- friend signed char atomic_swap_explicit( volatile atomic_schar*,
+ friend signed char atomic_exchange_explicit( volatile atomic_schar*,
signed char, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_schar*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_schar*,
+ signed char*, signed char, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_schar*,
signed char*, signed char, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_schar*, memory_order );
friend signed char atomic_fetch_add_explicit( volatile atomic_schar*,
void store( unsigned char,
memory_order = memory_order_seq_cst ) volatile;
unsigned char load( memory_order = memory_order_seq_cst ) volatile;
- unsigned char swap( unsigned char,
+ unsigned char exchange( unsigned char,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( unsigned char&, unsigned char,
+ bool compare_exchange_weak( unsigned char&, unsigned char,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( unsigned char&, unsigned char,
memory_order, memory_order ) volatile;
- bool compare_swap( unsigned char&, unsigned char,
+ bool compare_exchange_weak( unsigned char&, unsigned char,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( unsigned char&, unsigned char,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
unsigned char fetch_add( unsigned char,
memory_order );
friend unsigned char atomic_load_explicit( volatile atomic_uchar*,
memory_order );
- friend unsigned char atomic_swap_explicit( volatile atomic_uchar*,
+ friend unsigned char atomic_exchange_explicit( volatile atomic_uchar*,
unsigned char, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_uchar*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uchar*,
+ unsigned char*, unsigned char, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uchar*,
unsigned char*, unsigned char, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_uchar*, memory_order );
friend unsigned char atomic_fetch_add_explicit( volatile atomic_uchar*,
void store( short,
memory_order = memory_order_seq_cst ) volatile;
short load( memory_order = memory_order_seq_cst ) volatile;
- short swap( short,
+ short exchange( short,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( short&, short,
+ bool compare_exchange_weak( short&, short,
memory_order, memory_order ) volatile;
- bool compare_swap( short&, short,
+ bool compare_exchange_strong( short&, short,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_weak( short&, short,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( short&, short,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
short fetch_add( short,
memory_order );
friend short atomic_load_explicit( volatile atomic_short*,
memory_order );
- friend short atomic_swap_explicit( volatile atomic_short*,
+ friend short atomic_exchange_explicit( volatile atomic_short*,
short, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_short*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_short*,
+ short*, short, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_short*,
short*, short, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_short*, memory_order );
friend short atomic_fetch_add_explicit( volatile atomic_short*,
void store( unsigned short,
memory_order = memory_order_seq_cst ) volatile;
unsigned short load( memory_order = memory_order_seq_cst ) volatile;
- unsigned short swap( unsigned short,
+ unsigned short exchange( unsigned short,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( unsigned short&, unsigned short,
+ bool compare_exchange_weak( unsigned short&, unsigned short,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( unsigned short&, unsigned short,
memory_order, memory_order ) volatile;
- bool compare_swap( unsigned short&, unsigned short,
+ bool compare_exchange_weak( unsigned short&, unsigned short,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( unsigned short&, unsigned short,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
unsigned short fetch_add( unsigned short,
memory_order );
friend unsigned short atomic_load_explicit( volatile atomic_ushort*,
memory_order );
- friend unsigned short atomic_swap_explicit( volatile atomic_ushort*,
+ friend unsigned short atomic_exchange_explicit( volatile atomic_ushort*,
unsigned short, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_ushort*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ushort*,
+ unsigned short*, unsigned short, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ushort*,
unsigned short*, unsigned short, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_ushort*, memory_order );
friend unsigned short atomic_fetch_add_explicit( volatile atomic_ushort*,
void store( int,
memory_order = memory_order_seq_cst ) volatile;
int load( memory_order = memory_order_seq_cst ) volatile;
- int swap( int,
+ int exchange( int,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( int&, int,
+ bool compare_exchange_weak( int&, int,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( int&, int,
memory_order, memory_order ) volatile;
- bool compare_swap( int&, int,
+ bool compare_exchange_weak( int&, int,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( int&, int,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
int fetch_add( int,
memory_order );
friend int atomic_load_explicit( volatile atomic_int*,
memory_order );
- friend int atomic_swap_explicit( volatile atomic_int*,
+ friend int atomic_exchange_explicit( volatile atomic_int*,
int, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_int*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_int*,
+ int*, int, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_int*,
int*, int, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_int*, memory_order );
friend int atomic_fetch_add_explicit( volatile atomic_int*,
void store( unsigned int,
memory_order = memory_order_seq_cst ) volatile;
unsigned int load( memory_order = memory_order_seq_cst ) volatile;
- unsigned int swap( unsigned int,
+ unsigned int exchange( unsigned int,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( unsigned int&, unsigned int,
+ bool compare_exchange_weak( unsigned int&, unsigned int,
memory_order, memory_order ) volatile;
- bool compare_swap( unsigned int&, unsigned int,
+ bool compare_exchange_strong( unsigned int&, unsigned int,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_weak( unsigned int&, unsigned int,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( unsigned int&, unsigned int,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
unsigned int fetch_add( unsigned int,
memory_order );
friend unsigned int atomic_load_explicit( volatile atomic_uint*,
memory_order );
- friend unsigned int atomic_swap_explicit( volatile atomic_uint*,
+ friend unsigned int atomic_exchange_explicit( volatile atomic_uint*,
unsigned int, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_uint*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uint*,
+ unsigned int*, unsigned int, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uint*,
unsigned int*, unsigned int, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_uint*, memory_order );
friend unsigned int atomic_fetch_add_explicit( volatile atomic_uint*,
void store( long,
memory_order = memory_order_seq_cst ) volatile;
long load( memory_order = memory_order_seq_cst ) volatile;
- long swap( long,
+ long exchange( long,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( long&, long,
+ bool compare_exchange_weak( long&, long,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( long&, long,
memory_order, memory_order ) volatile;
- bool compare_swap( long&, long,
+ bool compare_exchange_weak( long&, long,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( long&, long,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
long fetch_add( long,
memory_order );
friend long atomic_load_explicit( volatile atomic_long*,
memory_order );
- friend long atomic_swap_explicit( volatile atomic_long*,
+ friend long atomic_exchange_explicit( volatile atomic_long*,
long, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_long*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_long*,
+ long*, long, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_long*,
long*, long, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_long*, memory_order );
friend long atomic_fetch_add_explicit( volatile atomic_long*,
void store( unsigned long,
memory_order = memory_order_seq_cst ) volatile;
unsigned long load( memory_order = memory_order_seq_cst ) volatile;
- unsigned long swap( unsigned long,
+ unsigned long exchange( unsigned long,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( unsigned long&, unsigned long,
+ bool compare_exchange_weak( unsigned long&, unsigned long,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( unsigned long&, unsigned long,
memory_order, memory_order ) volatile;
- bool compare_swap( unsigned long&, unsigned long,
+ bool compare_exchange_weak( unsigned long&, unsigned long,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( unsigned long&, unsigned long,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
unsigned long fetch_add( unsigned long,
memory_order );
friend unsigned long atomic_load_explicit( volatile atomic_ulong*,
memory_order );
- friend unsigned long atomic_swap_explicit( volatile atomic_ulong*,
+ friend unsigned long atomic_exchange_explicit( volatile atomic_ulong*,
unsigned long, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_ulong*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ulong*,
+ unsigned long*, unsigned long, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ulong*,
unsigned long*, unsigned long, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_ulong*, memory_order );
friend unsigned long atomic_fetch_add_explicit( volatile atomic_ulong*,
void store( long long,
memory_order = memory_order_seq_cst ) volatile;
long long load( memory_order = memory_order_seq_cst ) volatile;
- long long swap( long long,
+ long long exchange( long long,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( long long&, long long,
+ bool compare_exchange_weak( long long&, long long,
memory_order, memory_order ) volatile;
- bool compare_swap( long long&, long long,
+ bool compare_exchange_strong( long long&, long long,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_weak( long long&, long long,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( long long&, long long,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
long long fetch_add( long long,
memory_order );
friend long long atomic_load_explicit( volatile atomic_llong*,
memory_order );
- friend long long atomic_swap_explicit( volatile atomic_llong*,
+ friend long long atomic_exchange_explicit( volatile atomic_llong*,
long long, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_llong*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_llong*,
+ long long*, long long, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_llong*,
long long*, long long, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_llong*, memory_order );
friend long long atomic_fetch_add_explicit( volatile atomic_llong*,
void store( unsigned long long,
memory_order = memory_order_seq_cst ) volatile;
unsigned long long load( memory_order = memory_order_seq_cst ) volatile;
- unsigned long long swap( unsigned long long,
+ unsigned long long exchange( unsigned long long,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( unsigned long long&, unsigned long long,
+ bool compare_exchange_weak( unsigned long long&, unsigned long long,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( unsigned long long&, unsigned long long,
memory_order, memory_order ) volatile;
- bool compare_swap( unsigned long long&, unsigned long long,
+ bool compare_exchange_weak( unsigned long long&, unsigned long long,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( unsigned long long&, unsigned long long,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
unsigned long long fetch_add( unsigned long long,
memory_order );
friend unsigned long long atomic_load_explicit( volatile atomic_ullong*,
memory_order );
- friend unsigned long long atomic_swap_explicit( volatile atomic_ullong*,
+ friend unsigned long long atomic_exchange_explicit( volatile atomic_ullong*,
unsigned long long, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_ullong*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ullong*,
+ unsigned long long*, unsigned long long, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ullong*,
unsigned long long*, unsigned long long, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_ullong*, memory_order );
friend unsigned long long atomic_fetch_add_explicit( volatile atomic_ullong*,
bool is_lock_free() const volatile;
void store( wchar_t, memory_order = memory_order_seq_cst ) volatile;
wchar_t load( memory_order = memory_order_seq_cst ) volatile;
- wchar_t swap( wchar_t,
+ wchar_t exchange( wchar_t,
memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( wchar_t&, wchar_t,
+ bool compare_exchange_weak( wchar_t&, wchar_t,
+ memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( wchar_t&, wchar_t,
memory_order, memory_order ) volatile;
- bool compare_swap( wchar_t&, wchar_t,
+ bool compare_exchange_weak( wchar_t&, wchar_t,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( wchar_t&, wchar_t,
memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
wchar_t fetch_add( wchar_t,
memory_order );
friend wchar_t atomic_load_explicit( volatile atomic_wchar_t*,
memory_order );
- friend wchar_t atomic_swap_explicit( volatile atomic_wchar_t*,
+ friend wchar_t atomic_exchange_explicit( volatile atomic_wchar_t*,
wchar_t, memory_order );
- friend bool atomic_compare_swap_explicit( volatile atomic_wchar_t*,
+ friend bool atomic_compare_exchange_weak_explicit( volatile atomic_wchar_t*,
+ wchar_t*, wchar_t, memory_order, memory_order );
+ friend bool atomic_compare_exchange_strong_explicit( volatile atomic_wchar_t*,
wchar_t*, wchar_t, memory_order, memory_order );
friend void atomic_fence( const volatile atomic_wchar_t*, memory_order );
friend wchar_t atomic_fetch_add_explicit( volatile atomic_wchar_t*,
bool is_lock_free() const volatile;
void store( T, memory_order = memory_order_seq_cst ) volatile;
T load( memory_order = memory_order_seq_cst ) volatile;
- T swap( T __v__, memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( T&, T, memory_order, memory_order ) volatile;
- bool compare_swap( T&, T, memory_order = memory_order_seq_cst ) volatile;
+ T exchange( T __v__, memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_weak( T&, T, memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( T&, T, memory_order, memory_order ) volatile;
+ bool compare_exchange_weak( T&, T, memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( T&, T, memory_order = memory_order_seq_cst ) volatile;
void fence( memory_order ) const volatile;
CPP0X( atomic() = default; )
template<typename T> struct atomic< T* > : atomic_address
{
T* load( memory_order = memory_order_seq_cst ) volatile;
- T* swap( T*, memory_order = memory_order_seq_cst ) volatile;
- bool compare_swap( T*&, T*, memory_order, memory_order ) volatile;
- bool compare_swap( T*&, T*,
+ T* exchange( T*, memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_weak( T*&, T*, memory_order, memory_order ) volatile;
+ bool compare_exchange_strong( T*&, T*, memory_order, memory_order ) volatile;
+ bool compare_exchange_weak( T*&, T*,
+ memory_order = memory_order_seq_cst ) volatile;
+ bool compare_exchange_strong( T*&, T*,
memory_order = memory_order_seq_cst ) volatile;
T* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
T* fetch_sub( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
#ifdef __cplusplus
-inline bool atomic_is_lock_free( const volatile atomic_bool* __a__ )
+inline bool atomic_is_lock_free
+( const volatile atomic_bool* __a__ )
{ return false; }
inline bool atomic_load_explicit
( volatile atomic_bool* __a__, memory_order __x__ )
{ return _ATOMIC_LOAD_( __a__, __x__ ); }
-inline bool atomic_load( volatile atomic_bool* __a__ )
-{ return atomic_load_explicit( __a__, memory_order_seq_cst ); }
+inline bool atomic_load
+( volatile atomic_bool* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
inline void atomic_store_explicit
( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
( volatile atomic_bool* __a__, bool __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_swap_explicit
+inline bool atomic_exchange_explicit
( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline bool atomic_swap
+inline bool atomic_exchange
( volatile atomic_bool* __a__, bool __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_bool* __a__, bool* __e__, bool __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
+
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_bool* __a__, bool* __e__, bool __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_address* __a__, void* __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline void* atomic_swap_explicit
+inline void* atomic_exchange_explicit
( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
-{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
+{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline void* atomic_swap
+inline void* atomic_exchange
( volatile atomic_address* __a__, void* __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_address* __a__, void** __e__, void* __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
+
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_address* __a__, void** __e__, void* __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_address* __a__, void** __e__, void* __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_address* __a__, void** __e__, void* __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_char* __a__, char __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline char atomic_swap_explicit
+inline char atomic_exchange_explicit
( volatile atomic_char* __a__, char __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline char atomic_swap
+inline char atomic_exchange
( volatile atomic_char* __a__, char __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_char* __a__, char* __e__, char __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_char* __a__, char* __e__, char __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
+( volatile atomic_char* __a__, char* __e__, char __m__ )
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
( volatile atomic_char* __a__, char* __e__, char __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_schar* __a__, signed char __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline signed char atomic_swap_explicit
+inline signed char atomic_exchange_explicit
( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline signed char atomic_swap
+inline signed char atomic_exchange
( volatile atomic_schar* __a__, signed char __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
+
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_uchar* __a__, unsigned char __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline unsigned char atomic_swap_explicit
+inline unsigned char atomic_exchange_explicit
( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline unsigned char atomic_swap
+inline unsigned char atomic_exchange
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_short* __a__, short __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline short atomic_swap_explicit
+inline short atomic_exchange_explicit
( volatile atomic_short* __a__, short __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline short atomic_swap
+inline short atomic_exchange
( volatile atomic_short* __a__, short __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_short* __a__, short* __e__, short __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
+
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_short* __a__, short* __e__, short __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
+( volatile atomic_short* __a__, short* __e__, short __m__ )
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
( volatile atomic_short* __a__, short* __e__, short __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_ushort* __a__, unsigned short __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline unsigned short atomic_swap_explicit
+inline unsigned short atomic_exchange_explicit
( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline unsigned short atomic_swap
+inline unsigned short atomic_exchange
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
+
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_int* __a__, int __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline int atomic_swap_explicit
+inline int atomic_exchange_explicit
( volatile atomic_int* __a__, int __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline int atomic_swap
+inline int atomic_exchange
( volatile atomic_int* __a__, int __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_int* __a__, int* __e__, int __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_int* __a__, int* __e__, int __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_int* __a__, int* __e__, int __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_int* __a__, int* __e__, int __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_uint* __a__, unsigned int __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline unsigned int atomic_swap_explicit
+inline unsigned int atomic_exchange_explicit
( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline unsigned int atomic_swap
+inline unsigned int atomic_exchange
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
+( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_long* __a__, long __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline long atomic_swap_explicit
+inline long atomic_exchange_explicit
( volatile atomic_long* __a__, long __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline long atomic_swap
+inline long atomic_exchange
( volatile atomic_long* __a__, long __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_long* __a__, long* __e__, long __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
+
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_long* __a__, long* __e__, long __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_long* __a__, long* __e__, long __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_long* __a__, long* __e__, long __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_ulong* __a__, unsigned long __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline unsigned long atomic_swap_explicit
+inline unsigned long atomic_exchange_explicit
( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline unsigned long atomic_swap
+inline unsigned long atomic_exchange
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_llong* __a__, long long __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline long long atomic_swap_explicit
+inline long long atomic_exchange_explicit
( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline long long atomic_swap
+inline long long atomic_exchange
( volatile atomic_llong* __a__, long long __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_llong* __a__, long long* __e__, long long __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
+
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_llong* __a__, long long* __e__, long long __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_ullong* __a__, unsigned long long __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline unsigned long long atomic_swap_explicit
+inline unsigned long long atomic_exchange_explicit
( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline unsigned long long atomic_swap
+inline unsigned long long atomic_exchange
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
+
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
+( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
{ atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline wchar_t atomic_swap_explicit
+inline wchar_t atomic_exchange_explicit
( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
-inline wchar_t atomic_swap
+inline wchar_t atomic_exchange
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ return atomic_swap_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
-inline bool atomic_compare_swap_explicit
+inline bool atomic_compare_exchange_weak_explicit
+( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
+ memory_order __x__, memory_order __y__ )
+{ return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
+
+inline bool atomic_compare_exchange_strong_explicit
( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
memory_order __x__, memory_order __y__ )
{ return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
-inline bool atomic_compare_swap
+inline bool atomic_compare_exchange_weak
+( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
+{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+ memory_order_seq_cst, memory_order_seq_cst ); }
+
+inline bool atomic_compare_exchange_strong
( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
-{ return atomic_compare_swap_explicit( __a__, __e__, __m__,
+{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
memory_order_seq_cst, memory_order_seq_cst ); }
inline void atomic_fence
inline void* atomic_fetch_add_explicit
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
-{ void* volatile* __p__ = &((__a__)->__f__);
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ );
- __atomic_flag_wait_explicit__( __g__, __x__ );
- void* __r__ = *__p__;
- *__p__ = (void*)((char*)(*__p__) + __m__);
- atomic_flag_clear_explicit( __g__, __x__ );
+{
+ void* volatile* __p__ = &((__a__)->__f__);
+ void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__);
+ model_rmw_action((void *)__p__, __x__, (uint64_t) ((char*)(*__p__) + __m__));
return __r__; }
inline void* atomic_fetch_add
inline void* atomic_fetch_sub_explicit
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
-{ void* volatile* __p__ = &((__a__)->__f__);
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ );
- __atomic_flag_wait_explicit__( __g__, __x__ );
- void* __r__ = *__p__;
- *__p__ = (void*)((char*)(*__p__) - __m__);
- atomic_flag_clear_explicit( __g__, __x__ );
+{
+ void* volatile* __p__ = &((__a__)->__f__);
+ void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__);
+ model_rmw_action((void *)__p__, __x__, (uint64_t)((char*)(*__p__) - __m__));
return __r__; }
inline void* atomic_fetch_sub
( volatile atomic_address* __a__, ptrdiff_t __m__ )
{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
-
inline char atomic_fetch_add_explicit
( volatile atomic_char* __a__, char __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
inline char atomic_fetch_add
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline char atomic_fetch_sub_explicit
inline char atomic_fetch_sub
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline char atomic_fetch_and_explicit
inline char atomic_fetch_and
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline char atomic_fetch_or_explicit
inline char atomic_fetch_or
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline char atomic_fetch_xor_explicit
inline char atomic_fetch_xor
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_add_explicit
inline signed char atomic_fetch_add
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_sub_explicit
inline signed char atomic_fetch_sub
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_and_explicit
inline signed char atomic_fetch_and
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_or_explicit
inline signed char atomic_fetch_or
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_xor_explicit
inline signed char atomic_fetch_xor
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_add_explicit
inline unsigned char atomic_fetch_add
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_sub_explicit
inline unsigned char atomic_fetch_sub
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_and_explicit
inline unsigned char atomic_fetch_and
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_or_explicit
inline unsigned char atomic_fetch_or
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_xor_explicit
inline unsigned char atomic_fetch_xor
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_add_explicit
inline short atomic_fetch_add
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_sub_explicit
inline short atomic_fetch_sub
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_and_explicit
inline short atomic_fetch_and
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_or_explicit
inline short atomic_fetch_or
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_xor_explicit
inline short atomic_fetch_xor
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_add_explicit
inline unsigned short atomic_fetch_add
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_sub_explicit
inline unsigned short atomic_fetch_sub
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_and_explicit
inline unsigned short atomic_fetch_and
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_or_explicit
inline unsigned short atomic_fetch_or
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_xor_explicit
inline unsigned short atomic_fetch_xor
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_add_explicit
inline int atomic_fetch_add
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_sub_explicit
inline int atomic_fetch_sub
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_and_explicit
inline int atomic_fetch_and
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_or_explicit
inline int atomic_fetch_or
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_xor_explicit
inline int atomic_fetch_xor
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_add_explicit
inline unsigned int atomic_fetch_add
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_sub_explicit
inline unsigned int atomic_fetch_sub
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_and_explicit
inline unsigned int atomic_fetch_and
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_or_explicit
inline unsigned int atomic_fetch_or
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_xor_explicit
inline unsigned int atomic_fetch_xor
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_add_explicit
inline long atomic_fetch_add
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_sub_explicit
inline long atomic_fetch_sub
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_and_explicit
inline long atomic_fetch_and
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_or_explicit
inline long atomic_fetch_or
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_xor_explicit
inline long atomic_fetch_xor
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_add_explicit
inline unsigned long atomic_fetch_add
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_sub_explicit
inline unsigned long atomic_fetch_sub
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_and_explicit
inline unsigned long atomic_fetch_and
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_or_explicit
inline unsigned long atomic_fetch_or
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_xor_explicit
inline unsigned long atomic_fetch_xor
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_add_explicit
inline long long atomic_fetch_add
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_sub_explicit
inline long long atomic_fetch_sub
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_and_explicit
inline long long atomic_fetch_and
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_or_explicit
inline long long atomic_fetch_or
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_xor_explicit
inline long long atomic_fetch_xor
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_add_explicit
inline unsigned long long atomic_fetch_add
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_sub_explicit
inline unsigned long long atomic_fetch_sub
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_and_explicit
inline unsigned long long atomic_fetch_and
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_or_explicit
inline unsigned long long atomic_fetch_or
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_xor_explicit
inline unsigned long long atomic_fetch_xor
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_add_explicit
inline wchar_t atomic_fetch_add
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_sub_explicit
inline wchar_t atomic_fetch_sub
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_and_explicit
inline wchar_t atomic_fetch_and
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_or_explicit
inline wchar_t atomic_fetch_or
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_xor_explicit
inline wchar_t atomic_fetch_xor
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
#else
#define atomic_load_explicit( __a__, __x__ ) \
_ATOMIC_LOAD_( __a__, __x__ )
+#define atomic_init( __a__, __m__ ) \
+_ATOMIC_INIT_( __a__, __m__ )
+
#define atomic_store( __a__, __m__ ) \
_ATOMIC_STORE_( __a__, __m__, memory_order_seq_cst )
#define atomic_store_explicit( __a__, __m__, __x__ ) \
_ATOMIC_STORE_( __a__, __m__, __x__ )
-#define atomic_swap( __a__, __m__ ) \
+#define atomic_exchange( __a__, __m__ ) \
_ATOMIC_MODIFY_( __a__, =, __m__, memory_order_seq_cst )
-#define atomic_swap_explicit( __a__, __m__, __x__ ) \
+#define atomic_exchange_explicit( __a__, __m__, __x__ ) \
_ATOMIC_MODIFY_( __a__, =, __m__, __x__ )
-#define atomic_compare_swap( __a__, __e__, __m__ ) \
+#define atomic_compare_exchange_weak( __a__, __e__, __m__ ) \
+_ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, memory_order_seq_cst )
+
+#define atomic_compare_exchange_strong( __a__, __e__, __m__ ) \
_ATOMIC_CMPSWP_( __a__, __e__, __m__, memory_order_seq_cst )
-#define atomic_compare_swap_explicit( __a__, __e__, __m__, __x__, __y__ ) \
+#define atomic_compare_exchange_weak_explicit( __a__, __e__, __m__, __x__, __y__ ) \
+_ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ )
+
+#define atomic_compare_exchange_strong_explicit( __a__, __e__, __m__, __x__, __y__ ) \
_ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )
#define atomic_fence( __a__, __x__ ) \
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline bool atomic_bool::swap
+inline bool atomic_bool::exchange
( bool __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
+
+inline bool atomic_bool::compare_exchange_weak
+( bool& __e__, bool __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_bool::compare_swap
+inline bool atomic_bool::compare_exchange_strong
( bool& __e__, bool __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_bool::compare_swap
+inline bool atomic_bool::compare_exchange_weak
( bool& __e__, bool __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+inline bool atomic_bool::compare_exchange_strong
+( bool& __e__, bool __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline void* atomic_address::swap
+inline void* atomic_address::exchange
( void* __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
+
+inline bool atomic_address::compare_exchange_weak
+( void*& __e__, void* __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_address::compare_swap
+inline bool atomic_address::compare_exchange_strong
( void*& __e__, void* __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_address::compare_swap
+inline bool atomic_address::compare_exchange_weak
( void*& __e__, void* __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+inline bool atomic_address::compare_exchange_strong
+( void*& __e__, void* __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline char atomic_char::swap
+inline char atomic_char::exchange
( char __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
-inline bool atomic_char::compare_swap
+inline bool atomic_char::compare_exchange_weak
( char& __e__, char __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_char::compare_exchange_strong
+( char& __e__, char __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_char::compare_exchange_weak
+( char& __e__, char __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
-inline bool atomic_char::compare_swap
+inline bool atomic_char::compare_exchange_strong
( char& __e__, char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline signed char atomic_schar::swap
+inline signed char atomic_schar::exchange
( signed char __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
+
+inline bool atomic_schar::compare_exchange_weak
+( signed char& __e__, signed char __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_schar::compare_swap
+inline bool atomic_schar::compare_exchange_strong
( signed char& __e__, signed char __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_schar::compare_exchange_weak
+( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
-inline bool atomic_schar::compare_swap
+inline bool atomic_schar::compare_exchange_strong
( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline unsigned char atomic_uchar::swap
+inline unsigned char atomic_uchar::exchange
( unsigned char __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
-inline bool atomic_uchar::compare_swap
+inline bool atomic_uchar::compare_exchange_weak
( unsigned char& __e__, unsigned char __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_uchar::compare_swap
+inline bool atomic_uchar::compare_exchange_strong
+( unsigned char& __e__, unsigned char __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_uchar::compare_exchange_weak
( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+inline bool atomic_uchar::compare_exchange_strong
+( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline short atomic_short::swap
+inline short atomic_short::exchange
( short __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
-inline bool atomic_short::compare_swap
+inline bool atomic_short::compare_exchange_weak
( short& __e__, short __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_short::compare_swap
+inline bool atomic_short::compare_exchange_strong
+( short& __e__, short __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_short::compare_exchange_weak
+( short& __e__, short __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+inline bool atomic_short::compare_exchange_strong
( short& __e__, short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline unsigned short atomic_ushort::swap
+inline unsigned short atomic_ushort::exchange
( unsigned short __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
+
+inline bool atomic_ushort::compare_exchange_weak
+( unsigned short& __e__, unsigned short __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_ushort::compare_swap
+inline bool atomic_ushort::compare_exchange_strong
( unsigned short& __e__, unsigned short __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_ushort::compare_swap
+inline bool atomic_ushort::compare_exchange_weak
( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+inline bool atomic_ushort::compare_exchange_strong
+( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline int atomic_int::swap
+inline int atomic_int::exchange
( int __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
-inline bool atomic_int::compare_swap
+inline bool atomic_int::compare_exchange_weak
( int& __e__, int __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_int::compare_exchange_strong
+( int& __e__, int __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_int::compare_exchange_weak
+( int& __e__, int __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
-inline bool atomic_int::compare_swap
+inline bool atomic_int::compare_exchange_strong
( int& __e__, int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline unsigned int atomic_uint::swap
+inline unsigned int atomic_uint::exchange
( unsigned int __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
+
+inline bool atomic_uint::compare_exchange_weak
+( unsigned int& __e__, unsigned int __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_uint::compare_swap
+inline bool atomic_uint::compare_exchange_strong
( unsigned int& __e__, unsigned int __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_uint::compare_exchange_weak
+( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
-inline bool atomic_uint::compare_swap
+inline bool atomic_uint::compare_exchange_strong
( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline long atomic_long::swap
+inline long atomic_long::exchange
( long __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
-inline bool atomic_long::compare_swap
+inline bool atomic_long::compare_exchange_weak
( long& __e__, long __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_long::compare_swap
+inline bool atomic_long::compare_exchange_strong
+( long& __e__, long __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_long::compare_exchange_weak
( long& __e__, long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+inline bool atomic_long::compare_exchange_strong
+( long& __e__, long __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline unsigned long atomic_ulong::swap
+inline unsigned long atomic_ulong::exchange
( unsigned long __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
-inline bool atomic_ulong::compare_swap
+inline bool atomic_ulong::compare_exchange_weak
( unsigned long& __e__, unsigned long __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_ulong::compare_swap
+inline bool atomic_ulong::compare_exchange_strong
+( unsigned long& __e__, unsigned long __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_ulong::compare_exchange_weak
+( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+inline bool atomic_ulong::compare_exchange_strong
( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline long long atomic_llong::swap
+inline long long atomic_llong::exchange
( long long __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
+
+inline bool atomic_llong::compare_exchange_weak
+( long long& __e__, long long __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_llong::compare_swap
+inline bool atomic_llong::compare_exchange_strong
( long long& __e__, long long __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_llong::compare_swap
+inline bool atomic_llong::compare_exchange_weak
( long long& __e__, long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+inline bool atomic_llong::compare_exchange_strong
+( long long& __e__, long long __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline unsigned long long atomic_ullong::swap
+inline unsigned long long atomic_ullong::exchange
( unsigned long long __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
-inline bool atomic_ullong::compare_swap
+inline bool atomic_ullong::compare_exchange_weak
( unsigned long long& __e__, unsigned long long __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_ullong::compare_exchange_strong
+( unsigned long long& __e__, unsigned long long __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_ullong::compare_exchange_weak
+( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
-inline bool atomic_ullong::compare_swap
+inline bool atomic_ullong::compare_exchange_strong
( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
( memory_order __x__ ) volatile
{ return atomic_load_explicit( this, __x__ ); }
-inline wchar_t atomic_wchar_t::swap
+inline wchar_t atomic_wchar_t::exchange
( wchar_t __m__, memory_order __x__ ) volatile
-{ return atomic_swap_explicit( this, __m__, __x__ ); }
+{ return atomic_exchange_explicit( this, __m__, __x__ ); }
+
+inline bool atomic_wchar_t::compare_exchange_weak
+( wchar_t& __e__, wchar_t __m__,
+ memory_order __x__, memory_order __y__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
-inline bool atomic_wchar_t::compare_swap
+inline bool atomic_wchar_t::compare_exchange_strong
( wchar_t& __e__, wchar_t __m__,
memory_order __x__, memory_order __y__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__, __y__ ); }
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
+
+inline bool atomic_wchar_t::compare_exchange_weak
+( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
+{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
-inline bool atomic_wchar_t::compare_swap
+inline bool atomic_wchar_t::compare_exchange_strong
( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
-{ return atomic_compare_swap_explicit( this, &__e__, __m__, __x__,
+{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
{ return _ATOMIC_LOAD_( this, __x__ ); }
template< typename T >
-inline T atomic<T>::swap( T __v__, memory_order __x__ ) volatile
+inline T atomic<T>::exchange( T __v__, memory_order __x__ ) volatile
{ return _ATOMIC_MODIFY_( this, =, __v__, __x__ ); }
template< typename T >
-inline bool atomic<T>::compare_swap
+inline bool atomic<T>::compare_exchange_weak
+( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
+{ return _ATOMIC_CMPSWP_WEAK_( this, &__r__, __v__, __x__ ); }
+
+template< typename T >
+inline bool atomic<T>::compare_exchange_strong
( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
{ return _ATOMIC_CMPSWP_( this, &__r__, __v__, __x__ ); }
template< typename T >
-inline bool atomic<T>::compare_swap
+inline bool atomic<T>::compare_exchange_weak
( T& __r__, T __v__, memory_order __x__ ) volatile
-{ return compare_swap( __r__, __v__, __x__,
+{ return compare_exchange_weak( __r__, __v__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+template< typename T >
+inline bool atomic<T>::compare_exchange_strong
+( T& __r__, T __v__, memory_order __x__ ) volatile
+{ return compare_exchange_strong( __r__, __v__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
{ return static_cast<T*>( atomic_address::load( __x__ ) ); }
template< typename T >
-T* atomic<T*>::swap( T* __v__, memory_order __x__ ) volatile
-{ return static_cast<T*>( atomic_address::swap( __v__, __x__ ) ); }
+T* atomic<T*>::exchange( T* __v__, memory_order __x__ ) volatile
+{ return static_cast<T*>( atomic_address::exchange( __v__, __x__ ) ); }
template< typename T >
-bool atomic<T*>::compare_swap
+bool atomic<T*>::compare_exchange_weak
( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
-{ return atomic_address::compare_swap( *reinterpret_cast<void**>( &__r__ ),
+{ return atomic_address::compare_exchange_weak( *reinterpret_cast<void**>( &__r__ ),
+ static_cast<void*>( __v__ ), __x__, __y__ ); }
+//{ return _ATOMIC_CMPSWP_WEAK_( this, &__r__, __v__, __x__ ); }
+
+template< typename T >
+bool atomic<T*>::compare_exchange_strong
+( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
+{ return atomic_address::compare_exchange_strong( *reinterpret_cast<void**>( &__r__ ),
static_cast<void*>( __v__ ), __x__, __y__ ); }
//{ return _ATOMIC_CMPSWP_( this, &__r__, __v__, __x__ ); }
template< typename T >
-bool atomic<T*>::compare_swap
+bool atomic<T*>::compare_exchange_weak
+( T*& __r__, T* __v__, memory_order __x__ ) volatile
+{ return compare_exchange_weak( __r__, __v__, __x__,
+ __x__ == memory_order_acq_rel ? memory_order_acquire :
+ __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+
+template< typename T >
+bool atomic<T*>::compare_exchange_strong
( T*& __r__, T* __v__, memory_order __x__ ) volatile
-{ return compare_swap( __r__, __v__, __x__,
+{ return compare_exchange_strong( __r__, __v__, __x__,
__x__ == memory_order_acq_rel ? memory_order_acquire :
__x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
} // namespace std
#endif
+#endif /* __IMPATOMIC_H__ */