X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=include%2Fimpatomic.h;h=70b77de2ddc28cf2c3a5c356f718666eb0ea5a65;hb=dfb47d6bc6aacb0ce93a6e497d8abd027c12ec50;hp=cf62f3960231d37c955aa0f22913005a2d4348a7;hpb=aa5eb7b5cb804a25b547fc0ae733498cbb167276;p=c11tester.git diff --git a/include/impatomic.h b/include/impatomic.h index cf62f396..70b77de2 100644 --- a/include/impatomic.h +++ b/include/impatomic.h @@ -1,3 +1,4 @@ +#include /** * @file impatomic.h * @brief Common header for C11/C++11 atomics @@ -14,6 +15,8 @@ #ifdef __cplusplus namespace std { +#else +#include #endif #define CPP0X( feature ) @@ -86,14 +89,16 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \ __typeof__(__m__) __v__ = (__m__); \ model_write_action((void *) __p__, __x__, (uint64_t) __v__); \ - __v__; }) + __v__ = __v__; /* Silence clang (-Wunused-value) */ \ + }) #define _ATOMIC_INIT_( __a__, __m__ ) \ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \ __typeof__(__m__) __v__ = (__m__); \ model_init_action((void *) __p__, (uint64_t) __v__); \ - __v__; }) + __v__ = __v__; /* Silence clang (-Wunused-value) */ \ + }) #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \ ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \ @@ -102,7 +107,8 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile __typeof__((__a__)->__f__) __copy__= __old__; \ __copy__ __o__ __v__; \ model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \ - __old__; }) + __old__ = __old__; /* Silence clang (-Wunused-value) */ \ + }) /* No spurious failure for now */ #define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_ @@ -112,8 +118,8 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile __typeof__(__e__) __q__ = (__e__); \ __typeof__(__m__) __v__ = (__m__); \ bool __r__; \ - __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \ - if (__t__ == * __q__ ) { \ + __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwrcas_action((void *)__p__, __x__, (uint64_t) * __q__, sizeof((__a__)->__f__)); \ + if (__t__ == * __q__ ) {; \ model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \ else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \ __r__; }) @@ -1618,6 +1624,10 @@ inline bool atomic_load_explicit inline bool atomic_load ( volatile atomic_bool* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_bool* __a__, bool __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_bool* __a__, bool __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -1665,6 +1675,10 @@ inline void* atomic_load_explicit inline void* atomic_load( volatile atomic_address* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_address* __a__, void* __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_address* __a__, void* __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -1712,6 +1726,10 @@ inline char atomic_load_explicit inline char atomic_load( volatile atomic_char* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_char* __a__, char __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_char* __a__, char __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -1759,6 +1777,10 @@ inline signed char atomic_load_explicit inline signed char atomic_load( volatile atomic_schar* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_schar* __a__, signed char __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -1806,6 +1828,10 @@ inline unsigned char atomic_load_explicit inline unsigned char atomic_load( volatile atomic_uchar* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_uchar* __a__, unsigned char __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -1853,6 +1879,10 @@ inline short atomic_load_explicit inline short atomic_load( volatile atomic_short* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_short* __a__, short __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_short* __a__, short __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -1900,6 +1930,10 @@ inline unsigned short atomic_load_explicit inline unsigned short atomic_load( volatile atomic_ushort* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_ushort* __a__, unsigned short __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -1947,6 +1981,10 @@ inline int atomic_load_explicit inline int atomic_load( volatile atomic_int* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_int* __a__, int __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_int* __a__, int __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -1994,6 +2032,10 @@ inline unsigned int atomic_load_explicit inline unsigned int atomic_load( volatile atomic_uint* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_uint* __a__, unsigned int __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -2041,6 +2083,10 @@ inline long atomic_load_explicit inline long atomic_load( volatile atomic_long* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_long* __a__, long __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_long* __a__, long __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -2088,6 +2134,10 @@ inline unsigned long atomic_load_explicit inline unsigned long atomic_load( volatile atomic_ulong* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_ulong* __a__, unsigned long __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -2135,6 +2185,10 @@ inline long long atomic_load_explicit inline long long atomic_load( volatile atomic_llong* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_llong* __a__, long long __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_llong* __a__, long long __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -2182,6 +2236,10 @@ inline unsigned long long atomic_load_explicit inline unsigned long long atomic_load( volatile atomic_ullong* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_ullong* __a__, unsigned long long __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -2229,6 +2287,10 @@ inline wchar_t atomic_load_explicit inline wchar_t atomic_load( volatile atomic_wchar_t* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline void atomic_init +( volatile atomic_wchar_t* __a__, wchar_t __m__ ) +{ _ATOMIC_INIT_( __a__, __m__ ); } + inline void atomic_store_explicit ( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ ) { _ATOMIC_STORE_( __a__, __m__, __x__ ); } @@ -2269,23 +2331,28 @@ inline bool atomic_compare_exchange_strong inline void* atomic_fetch_add_explicit ( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ ) { - void* volatile* __p__ = &((__a__)->__f__); - void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__); - model_rmw_action((void *)__p__, __x__, (uint64_t) ((char*)(*__p__) + __m__)); - return __r__; } + volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); + __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); + __typeof__((__a__)->__f__) __copy__= __old__; + __copy__ = (void *) (((char *)__copy__) + __m__); + model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); + return __old__; +} -inline void* atomic_fetch_add + inline void* atomic_fetch_add ( volatile atomic_address* __a__, ptrdiff_t __m__ ) { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline void* atomic_fetch_sub_explicit ( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ ) -{ - void* volatile* __p__ = &((__a__)->__f__); - void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__); - model_rmw_action((void *)__p__, __x__, (uint64_t)((char*)(*__p__) - __m__)); - return __r__; } +{ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); + __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); + __typeof__((__a__)->__f__) __copy__= __old__; + __copy__ = (void *) (((char *)__copy__) - __m__); + model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); + return __old__; +} inline void* atomic_fetch_sub ( volatile atomic_address* __a__, ptrdiff_t __m__ ) @@ -3844,11 +3911,11 @@ T* atomic::fetch_sub( ptrdiff_t __v__, memory_order __x__ ) volatile #ifdef __cplusplus extern "C" { #endif -inline void atomic_thread_fence(memory_order order) +static inline void atomic_thread_fence(memory_order order) { _ATOMIC_FENCE_(order); } /** @todo Do we want to try to support a user's signal-handler? */ -inline void atomic_signal_fence(memory_order order) +static inline void atomic_signal_fence(memory_order order) { /* No-op? */ } #ifdef __cplusplus }