+#include "memoryorder.h"
+#include "cmodelint.h"
#ifdef __cplusplus
-#include <cstddef>
namespace std {
-#else
-#include <stddef.h>
-#include <stdbool.h>
#endif
-
#define CPP0X( feature )
-
-typedef enum memory_order {
- memory_order_relaxed, memory_order_acquire, memory_order_release,
- memory_order_acq_rel, memory_order_seq_cst
-} memory_order;
-
-
typedef struct atomic_flag
{
#ifdef __cplusplus
( volatile atomic_flag* );
extern void __atomic_flag_wait_explicit__
( volatile atomic_flag*, memory_order );
-extern volatile atomic_flag* __atomic_flag_for_address__
-( const volatile void* __z__ )
-__attribute__((const));
#ifdef __cplusplus
}
#endif
-#define _ATOMIC_LOAD_( __a__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- __atomic_flag_wait_explicit__( __g__, __x__ ); \
- __typeof__((__a__)->__f__) __r__ = *__p__; \
- atomic_flag_clear_explicit( __g__, __x__ ); \
- __r__; })
-
-#define _ATOMIC_STORE_( __a__, __m__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__m__) __v__ = (__m__); \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- __atomic_flag_wait_explicit__( __g__, __x__ ); \
- *__p__ = __v__; \
- atomic_flag_clear_explicit( __g__, __x__ ); \
- __v__; })
-
-#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__m__) __v__ = (__m__); \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- __atomic_flag_wait_explicit__( __g__, __x__ ); \
- __typeof__((__a__)->__f__) __r__ = *__p__; \
- *__p__ __o__ __v__; \
- atomic_flag_clear_explicit( __g__, __x__ ); \
- __r__; })
-
-#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__e__) __q__ = (__e__); \
- __typeof__(__m__) __v__ = (__m__); \
- bool __r__; \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- __atomic_flag_wait_explicit__( __g__, __x__ ); \
- __typeof__((__a__)->__f__) __t__ = *__p__; \
- if ( __t__ == *__q__ ) { *__p__ = __v__; __r__ = true; } \
- else { *__q__ = __t__; __r__ = false; } \
- atomic_flag_clear_explicit( __g__, __x__ ); \
- __r__; })
+/*
+ The remainder of the example implementation uses the following
+ macros. These macros exploit GNU extensions for value-returning
+ blocks (AKA statement expressions) and __typeof__.
+
+ The macros rely on data fields of atomic structs being named __f__.
+ Other symbols used are __a__=atomic, __e__=expected, __f__=field,
+ __g__=flag, __m__=modified, __o__=operation, __r__=result,
+ __p__=pointer to field, __v__=value (for single evaluation),
+ __x__=memory-ordering, and __y__=memory-ordering.
+*/
+
+#define _ATOMIC_LOAD_( __a__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__((__a__)->__f__) __r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__); \
+ __r__; })
+
+#define _ATOMIC_STORE_( __a__, __m__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ model_write_action((void *) __p__, __x__, (uint64_t) __v__); \
+ __v__; })
+
+
+#define _ATOMIC_INIT_( __a__, __m__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ model_init_action((void *) __p__, (uint64_t) __v__); \
+ __v__; })
+
+#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ __typeof__((__a__)->__f__) __copy__= __old__; \
+ __copy__ __o__ __v__; \
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
+ __old__; })
+
+#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
+ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
+ __typeof__(__e__) __q__ = (__e__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ bool __r__; \
+ __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
+ if (__t__ == * __q__ ) { \
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
+ else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
+ __r__; })
#define _ATOMIC_FENCE_( __a__, __x__ ) \
-({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ ); \
- atomic_flag_fence( __g__, __x__ ); \
- })
-
-
-#define ATOMIC_INTEGRAL_LOCK_FREE 0
-#define ATOMIC_ADDRESS_LOCK_FREE 0
-
+ ({ model_fence_action(__x__);})
+
+
+#define ATOMIC_CHAR_LOCK_FREE 1
+#define ATOMIC_CHAR16_T_LOCK_FREE 1
+#define ATOMIC_CHAR32_T_LOCK_FREE 1
+#define ATOMIC_WCHAR_T_LOCK_FREE 1
+#define ATOMIC_SHORT_LOCK_FREE 1
+#define ATOMIC_INT_LOCK_FREE 1
+#define ATOMIC_LONG_LOCK_FREE 1
+#define ATOMIC_LLONG_LOCK_FREE 1
+#define ATOMIC_ADDRESS_LOCK_FREE 1
typedef struct atomic_bool
{
#ifdef __cplusplus
-inline bool atomic_is_lock_free( const volatile atomic_bool* __a__ )
+inline bool atomic_is_lock_free
+( const volatile atomic_bool* __a__ )
{ return false; }
inline bool atomic_load_explicit
( volatile atomic_bool* __a__, memory_order __x__ )
{ return _ATOMIC_LOAD_( __a__, __x__ ); }
-inline bool atomic_load( volatile atomic_bool* __a__ )
-{ return atomic_load_explicit( __a__, memory_order_seq_cst ); }
+inline bool atomic_load
+( volatile atomic_bool* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
inline void atomic_store_explicit
( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
inline void* atomic_swap_explicit
( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
-{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
+{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
inline void* atomic_swap
( volatile atomic_address* __a__, void* __m__ )
inline void* atomic_fetch_add_explicit
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
-{ void* volatile* __p__ = &((__a__)->__f__);
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ );
- __atomic_flag_wait_explicit__( __g__, __x__ );
- void* __r__ = *__p__;
- *__p__ = (void*)((char*)(*__p__) + __m__);
- atomic_flag_clear_explicit( __g__, __x__ );
+{
+ void* volatile* __p__ = &((__a__)->__f__);
+ void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__);
+ model_rmw_action((void *)__p__, __x__, (uint64_t) ((char*)(*__p__) + __m__));
return __r__; }
inline void* atomic_fetch_add
inline void* atomic_fetch_sub_explicit
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
-{ void* volatile* __p__ = &((__a__)->__f__);
- volatile atomic_flag* __g__ = __atomic_flag_for_address__( __p__ );
- __atomic_flag_wait_explicit__( __g__, __x__ );
- void* __r__ = *__p__;
- *__p__ = (void*)((char*)(*__p__) - __m__);
- atomic_flag_clear_explicit( __g__, __x__ );
+{
+ void* volatile* __p__ = &((__a__)->__f__);
+ void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__);
+ model_rmw_action((void *)__p__, __x__, (uint64_t)((char*)(*__p__) - __m__));
return __r__; }
inline void* atomic_fetch_sub
( volatile atomic_address* __a__, ptrdiff_t __m__ )
{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
-
inline char atomic_fetch_add_explicit
( volatile atomic_char* __a__, char __m__, memory_order __x__ )
{ return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
inline char atomic_fetch_add
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline char atomic_fetch_sub_explicit
inline char atomic_fetch_sub
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline char atomic_fetch_and_explicit
inline char atomic_fetch_and
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline char atomic_fetch_or_explicit
inline char atomic_fetch_or
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline char atomic_fetch_xor_explicit
inline char atomic_fetch_xor
( volatile atomic_char* __a__, char __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_add_explicit
inline signed char atomic_fetch_add
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_sub_explicit
inline signed char atomic_fetch_sub
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_and_explicit
inline signed char atomic_fetch_and
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_or_explicit
inline signed char atomic_fetch_or
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline signed char atomic_fetch_xor_explicit
inline signed char atomic_fetch_xor
( volatile atomic_schar* __a__, signed char __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_add_explicit
inline unsigned char atomic_fetch_add
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_sub_explicit
inline unsigned char atomic_fetch_sub
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_and_explicit
inline unsigned char atomic_fetch_and
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_or_explicit
inline unsigned char atomic_fetch_or
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned char atomic_fetch_xor_explicit
inline unsigned char atomic_fetch_xor
( volatile atomic_uchar* __a__, unsigned char __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_add_explicit
inline short atomic_fetch_add
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_sub_explicit
inline short atomic_fetch_sub
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_and_explicit
inline short atomic_fetch_and
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_or_explicit
inline short atomic_fetch_or
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline short atomic_fetch_xor_explicit
inline short atomic_fetch_xor
( volatile atomic_short* __a__, short __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_add_explicit
inline unsigned short atomic_fetch_add
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_sub_explicit
inline unsigned short atomic_fetch_sub
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_and_explicit
inline unsigned short atomic_fetch_and
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_or_explicit
inline unsigned short atomic_fetch_or
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned short atomic_fetch_xor_explicit
inline unsigned short atomic_fetch_xor
( volatile atomic_ushort* __a__, unsigned short __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_add_explicit
inline int atomic_fetch_add
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_sub_explicit
inline int atomic_fetch_sub
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_and_explicit
inline int atomic_fetch_and
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_or_explicit
inline int atomic_fetch_or
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline int atomic_fetch_xor_explicit
inline int atomic_fetch_xor
( volatile atomic_int* __a__, int __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_add_explicit
inline unsigned int atomic_fetch_add
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_sub_explicit
inline unsigned int atomic_fetch_sub
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_and_explicit
inline unsigned int atomic_fetch_and
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_or_explicit
inline unsigned int atomic_fetch_or
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned int atomic_fetch_xor_explicit
inline unsigned int atomic_fetch_xor
( volatile atomic_uint* __a__, unsigned int __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_add_explicit
inline long atomic_fetch_add
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_sub_explicit
inline long atomic_fetch_sub
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_and_explicit
inline long atomic_fetch_and
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_or_explicit
inline long atomic_fetch_or
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long atomic_fetch_xor_explicit
inline long atomic_fetch_xor
( volatile atomic_long* __a__, long __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_add_explicit
inline unsigned long atomic_fetch_add
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_sub_explicit
inline unsigned long atomic_fetch_sub
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_and_explicit
inline unsigned long atomic_fetch_and
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_or_explicit
inline unsigned long atomic_fetch_or
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long atomic_fetch_xor_explicit
inline unsigned long atomic_fetch_xor
( volatile atomic_ulong* __a__, unsigned long __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_add_explicit
inline long long atomic_fetch_add
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_sub_explicit
inline long long atomic_fetch_sub
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_and_explicit
inline long long atomic_fetch_and
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_or_explicit
inline long long atomic_fetch_or
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline long long atomic_fetch_xor_explicit
inline long long atomic_fetch_xor
( volatile atomic_llong* __a__, long long __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_add_explicit
inline unsigned long long atomic_fetch_add
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_sub_explicit
inline unsigned long long atomic_fetch_sub
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_and_explicit
inline unsigned long long atomic_fetch_and
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_or_explicit
inline unsigned long long atomic_fetch_or
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline unsigned long long atomic_fetch_xor_explicit
inline unsigned long long atomic_fetch_xor
( volatile atomic_ullong* __a__, unsigned long long __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_add_explicit
inline wchar_t atomic_fetch_add
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_sub_explicit
inline wchar_t atomic_fetch_sub
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_and_explicit
inline wchar_t atomic_fetch_and
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_or_explicit
inline wchar_t atomic_fetch_or
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
inline wchar_t atomic_fetch_xor_explicit
inline wchar_t atomic_fetch_xor
( volatile atomic_wchar_t* __a__, wchar_t __m__ )
-{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
+{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
#else
#define atomic_load_explicit( __a__, __x__ ) \
_ATOMIC_LOAD_( __a__, __x__ )
+#define atomic_init( __a__, __m__ ) \
+_ATOMIC_INIT_( __a__, __m__ )
+
#define atomic_store( __a__, __m__ ) \
_ATOMIC_STORE_( __a__, __m__, memory_order_seq_cst )