From 9fc455aa88e0fe0415081e282bd1bda4c633fa8f Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 31 Jul 2012 17:25:38 -0700 Subject: [PATCH] pull in most of atomic header file --- Makefile | 8 +- action.cc | 14 +-- action.h | 2 +- cmodelint.cc | 19 ++++ cmodelint.h | 19 ++++ include/impatomic.c | 37 -------- include/impatomic.h | 210 ++++++++++++++++++++++---------------------- libatomic.cc | 22 ----- libatomic.h | 29 ++---- libthreads.cc | 4 +- model.h | 1 - threads.cc | 2 +- userprog.c | 2 +- 13 files changed, 166 insertions(+), 203 deletions(-) create mode 100644 cmodelint.cc create mode 100644 cmodelint.h delete mode 100644 include/impatomic.c diff --git a/Makefile b/Makefile index 206d76d..7e3f84a 100644 --- a/Makefile +++ b/Makefile @@ -8,15 +8,15 @@ LIB_SO=lib$(LIB_NAME).so USER_O=userprog.o USER_H=libthreads.h libatomic.h -MODEL_CC=libthreads.cc schedule.cc libatomic.cc model.cc threads.cc librace.cc action.cc nodestack.cc clockvector.cc main.cc snapshot-interface.cc cyclegraph.cc datarace.cc -MODEL_O=libthreads.o schedule.o libatomic.o model.o threads.o librace.o action.o nodestack.o clockvector.o main.o snapshot-interface.o cyclegraph.o datarace.o -MODEL_H=libthreads.h schedule.h common.h libatomic.h model.h threads.h librace.h action.h nodestack.h clockvector.h snapshot-interface.h cyclegraph.h hashtable.h datarace.h config.h +MODEL_CC=libthreads.cc schedule.cc model.cc threads.cc librace.cc action.cc nodestack.cc clockvector.cc main.cc snapshot-interface.cc cyclegraph.cc datarace.cc impatomic.cc cmodelint.cc +MODEL_O=libthreads.o schedule.o model.o threads.o librace.o action.o nodestack.o clockvector.o main.o snapshot-interface.o cyclegraph.o datarace.o impatomic.o cmodelint.o +MODEL_H=libthreads.h schedule.h common.h libatomic.h model.h threads.h librace.h action.h nodestack.h clockvector.h snapshot-interface.h cyclegraph.h hashtable.h datarace.h config.h include/impatomic.h include/cstdatomic include/stdatomic.h cmodelint.h SHMEM_CC=snapshot.cc malloc.c mymemory.cc SHMEM_O=snapshot.o malloc.o mymemory.o SHMEM_H=snapshot.h snapshotimp.h mymemory.h -CPPFLAGS=-Wall -g -O0 +CPPFLAGS=-Wall -g -O0 -Iinclude -I. LDFLAGS=-ldl -lrt SHARED=-shared diff --git a/action.cc b/action.cc index c617f0d..8793337 100644 --- a/action.cc +++ b/action.cc @@ -49,9 +49,9 @@ bool ModelAction::is_initialization() const bool ModelAction::is_acquire() const { switch (order) { - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: + case std::memory_order_acquire: + case std::memory_order_acq_rel: + case std::memory_order_seq_cst: return true; default: return false; @@ -61,9 +61,9 @@ bool ModelAction::is_acquire() const bool ModelAction::is_release() const { switch (order) { - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: + case std::memory_order_release: + case std::memory_order_acq_rel: + case std::memory_order_seq_cst: return true; default: return false; @@ -72,7 +72,7 @@ bool ModelAction::is_release() const bool ModelAction::is_seqcst() const { - return order==memory_order_seq_cst; + return order==std::memory_order_seq_cst; } bool ModelAction::same_var(const ModelAction *act) const diff --git a/action.h b/action.h index fff6bc8..e43955d 100644 --- a/action.h +++ b/action.h @@ -9,9 +9,9 @@ #include #include "threads.h" -#include "libatomic.h" #include "mymemory.h" #include "clockvector.h" +#include "libatomic.h" /** Note that this value can be legitimately used by a program, and hence by iteself does not indicate no value. */ diff --git a/cmodelint.cc b/cmodelint.cc new file mode 100644 index 0000000..20f257c --- /dev/null +++ b/cmodelint.cc @@ -0,0 +1,19 @@ +#include "model.h" +#include "cmodelint.h" + +uint64_t model_read_action(void * obj, memory_order ord) { + model->switch_to_master(new ModelAction(ATOMIC_READ, ord, obj)); + return thread_current()->get_return_value(); +} + +void model_write_action(void * obj, memory_order ord, uint64_t val) { + model->switch_to_master(new ModelAction(ATOMIC_WRITE, ord, obj, val)); +} + +void model_init_action(void * obj, uint64_t val) { + model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, val)); +} + +void model_rmw_action(void *obj, memory_order ord, uint64_t val) { + model->switch_to_master(new ModelAction(ATOMIC_RMW, ord, obj, val)); +} diff --git a/cmodelint.h b/cmodelint.h new file mode 100644 index 0000000..0f65327 --- /dev/null +++ b/cmodelint.h @@ -0,0 +1,19 @@ +#ifndef CMODELINT_H +#define CMODELINT_H +#include + +#if __cplusplus +using std::memory_order; +extern "C" { +#endif + +uint64_t model_read_action(void * obj, memory_order ord); +void model_write_action(void * obj, memory_order ord, uint64_t val); +void model_rmw_action(void *obj, memory_order ord, uint64_t val); +void model_init_action(void * obj, uint64_t val); + +#if __cplusplus +} +#endif + +#endif diff --git a/include/impatomic.c b/include/impatomic.c deleted file mode 100644 index 05de31c..0000000 --- a/include/impatomic.c +++ /dev/null @@ -1,37 +0,0 @@ -#include -#include "impatomic.h" - -bool atomic_flag_test_and_set_explicit -( volatile atomic_flag* __a__, memory_order __x__ ) -{ - bool * __p__ = &((__a__)->__f__); - model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__)); - bool result = (void *) thread_current()->get_return_value(); - model->switch_to_master(new ModelAction(ATOMIC_RMW, __x__, __p__, true)); - return result; -} - -bool atomic_flag_test_and_set( volatile atomic_flag* __a__ ) -{ return atomic_flag_test_and_set_explicit( __a__, memory_order_seq_cst ); } - -void atomic_flag_clear_explicit -( volatile atomic_flag* __a__, memory_order __x__ ) -{ - bool * __p__ = &((__a__)->__f__); - model->switch_to_master(new ModelAction(ATOMIC_WRITE, __x__, __p__, false)); -} - -void atomic_flag_clear( volatile atomic_flag* __a__ ) -{ atomic_flag_clear_explicit( __a__, memory_order_seq_cst ); } - -void atomic_flag_fence( const volatile atomic_flag* __a__, memory_order __x__ ) -{ - ASSERT(0); -} - -void __atomic_flag_wait__( volatile atomic_flag* __a__ ) -{ while ( atomic_flag_test_and_set( __a__ ) ); } - -void __atomic_flag_wait_explicit__( volatile atomic_flag* __a__, - memory_order __x__ ) -{ while ( atomic_flag_test_and_set_explicit( __a__, __x__ ) ); } diff --git a/include/impatomic.h b/include/impatomic.h index 472e663..4d842d6 100644 --- a/include/impatomic.h +++ b/include/impatomic.h @@ -1,22 +1,12 @@ +#include "memoryorder.h" +#include "cmodelint.h" #ifdef __cplusplus -#include namespace std { -#else -#include -#include #endif - #define CPP0X( feature ) - -typedef enum memory_order { - memory_order_relaxed, memory_order_acquire, memory_order_release, - memory_order_acq_rel, memory_order_seq_cst -} memory_order; - - typedef struct atomic_flag { #ifdef __cplusplus @@ -82,43 +72,48 @@ inline void atomic_flag::fence( memory_order __x__ ) const volatile __x__=memory-ordering, and __y__=memory-ordering. */ -#define _ATOMIC_LOAD_( __a__, __x__ ) \ - ({ volatile __typeof__((__a__)->__f__)* __p__ = ((__a__)->__f__); \ - model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__)); \ - ((__typeof__((__a__)->__f__)) (thread_current()->get_return_value())); \ - }) - +#define _ATOMIC_LOAD_( __a__, __x__ ) \ + ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \ + __typeof__((__a__)->__f__) __r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__); \ + __r__; }) #define _ATOMIC_STORE_( __a__, __m__, __x__ ) \ - ({ volatile __typeof__((__a__)->__f__)* __p__ = ((__a__)->__f__); \ + ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \ __typeof__(__m__) __v__ = (__m__); \ - model->switch_to_master(new ModelAction(ATOMIC_WRITE, __x__, __p__, __v__)); \ + model_write_action((void *) __p__, __x__, (uint64_t) __v__); \ __v__; }) -#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \ - ({ volatile __typeof__((__a__)->__f__)* __p__ = ((__a__)->__f__); \ - model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__)); \ - __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) thread_current()->get_return_value(); \ + +#define _ATOMIC_INIT_( __a__, __m__ ) \ + ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \ __typeof__(__m__) __v__ = (__m__); \ - __typeof__((__a__)->__f__) __copy__= __old__; \ - __copy__ __o__ __v__; \ - model->switch_to_master(new ModelAction(ATOMIC_RMW, __x__, __p__, __copy__)); \ - __old__; }) + model_init_action((void *) __p__, (uint64_t) __v__); \ + __v__; }) + +#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \ + ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \ + __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_read_action((void *)__p__, __x__); \ + __typeof__(__m__) __v__ = (__m__); \ + __typeof__((__a__)->__f__) __copy__= __old__; \ + __copy__ __o__ __v__; \ + model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \ + __old__; }) #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \ - ({ volatile __typeof__((__a__)->__f__)* __p__ = ((__a__)->__f__); \ + ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \ __typeof__(__e__) __q__ = (__e__); \ __typeof__(__m__) __v__ = (__m__); \ bool __r__; \ - model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__)); \ - __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) thread_current()->get_return_value(); \ + __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_read_action((void *)__p__, __x__);\ if (__t__ == * __q__ ) { \ - model->switch_to_master(new ModelAction(ATOMIC_RMW, __x__, __p__, __v__)); __r__ = true; } \ + model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \ else { *__q__ = __t__; __r__ = false;} \ __r__; }) +//TODO #define _ATOMIC_FENCE_( __a__, __x__ ) \ - ({ ASSERT(0);}) +({ ;}) + #define ATOMIC_CHAR_LOCK_FREE 1 #define ATOMIC_CHAR16_T_LOCK_FREE 1 @@ -1547,15 +1542,16 @@ template<> struct atomic< wchar_t > : atomic_wchar_t #ifdef __cplusplus -inline bool atomic_is_lock_free( const volatile atomic_bool* __a__ ) +inline bool atomic_is_lock_free +( const volatile atomic_bool* __a__ ) { return false; } inline bool atomic_load_explicit ( volatile atomic_bool* __a__, memory_order __x__ ) { return _ATOMIC_LOAD_( __a__, __x__ ); } -inline bool atomic_load( volatile atomic_bool* __a__ ) -{ return atomic_load_explicit( __a__, memory_order_seq_cst ); } +inline bool atomic_load +( volatile atomic_bool* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); } inline void atomic_store_explicit ( volatile atomic_bool* __a__, bool __m__, memory_order __x__ ) @@ -1608,7 +1604,7 @@ inline void atomic_store inline void* atomic_swap_explicit ( volatile atomic_address* __a__, void* __m__, memory_order __x__ ) -{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); } +{ return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); } inline void* atomic_swap ( volatile atomic_address* __a__, void* __m__ ) @@ -2123,10 +2119,10 @@ inline void atomic_fence inline void* atomic_fetch_add_explicit ( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ ) -{ void* volatile* __p__ = &((__a__)->__f__); - model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__)); - void* __r__ = (void *) thread_current()->get_return_value(); - model->switch_to_master(new ModelAction(ATOMIC_RMW, __x__, __p__, (void*)((char*)(*__p__) + __m__))); +{ + void* volatile* __p__ = &((__a__)->__f__); + void* __r__ = (void *) model_read_action((void *)__p__, __x__); + model_rmw_action((void *)__p__, __x__, (uint64_t) ((char*)(*__p__) + __m__)); return __r__; } inline void* atomic_fetch_add @@ -2136,24 +2132,23 @@ inline void* atomic_fetch_add inline void* atomic_fetch_sub_explicit ( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ ) -{ void* volatile* __p__ = &((__a__)->__f__); - model->switch_to_master(new ModelAction(ATOMIC_READ, __x__, __p__)); - void* __r__ = (void *) thread_current()->get_return_value(); - model->switch_to_master(new ModelAction(ATOMIC_RMW, __x__, __p__, (void*)((char*)(*__p__) - __m__))); +{ + void* volatile* __p__ = &((__a__)->__f__); + void* __r__ = (void *) model_read_action((void *)__p__, __x__); + model_rmw_action((void *)__p__, __x__, (uint64_t)((char*)(*__p__) - __m__)); return __r__; } inline void* atomic_fetch_sub ( volatile atomic_address* __a__, ptrdiff_t __m__ ) { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } - inline char atomic_fetch_add_explicit ( volatile atomic_char* __a__, char __m__, memory_order __x__ ) { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); } inline char atomic_fetch_add ( volatile atomic_char* __a__, char __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline char atomic_fetch_sub_explicit @@ -2162,7 +2157,7 @@ inline char atomic_fetch_sub_explicit inline char atomic_fetch_sub ( volatile atomic_char* __a__, char __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline char atomic_fetch_and_explicit @@ -2171,7 +2166,7 @@ inline char atomic_fetch_and_explicit inline char atomic_fetch_and ( volatile atomic_char* __a__, char __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline char atomic_fetch_or_explicit @@ -2180,7 +2175,7 @@ inline char atomic_fetch_or_explicit inline char atomic_fetch_or ( volatile atomic_char* __a__, char __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline char atomic_fetch_xor_explicit @@ -2189,7 +2184,7 @@ inline char atomic_fetch_xor_explicit inline char atomic_fetch_xor ( volatile atomic_char* __a__, char __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline signed char atomic_fetch_add_explicit @@ -2198,7 +2193,7 @@ inline signed char atomic_fetch_add_explicit inline signed char atomic_fetch_add ( volatile atomic_schar* __a__, signed char __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline signed char atomic_fetch_sub_explicit @@ -2207,7 +2202,7 @@ inline signed char atomic_fetch_sub_explicit inline signed char atomic_fetch_sub ( volatile atomic_schar* __a__, signed char __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline signed char atomic_fetch_and_explicit @@ -2216,7 +2211,7 @@ inline signed char atomic_fetch_and_explicit inline signed char atomic_fetch_and ( volatile atomic_schar* __a__, signed char __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline signed char atomic_fetch_or_explicit @@ -2225,7 +2220,7 @@ inline signed char atomic_fetch_or_explicit inline signed char atomic_fetch_or ( volatile atomic_schar* __a__, signed char __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline signed char atomic_fetch_xor_explicit @@ -2234,7 +2229,7 @@ inline signed char atomic_fetch_xor_explicit inline signed char atomic_fetch_xor ( volatile atomic_schar* __a__, signed char __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned char atomic_fetch_add_explicit @@ -2243,7 +2238,7 @@ inline unsigned char atomic_fetch_add_explicit inline unsigned char atomic_fetch_add ( volatile atomic_uchar* __a__, unsigned char __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned char atomic_fetch_sub_explicit @@ -2252,7 +2247,7 @@ inline unsigned char atomic_fetch_sub_explicit inline unsigned char atomic_fetch_sub ( volatile atomic_uchar* __a__, unsigned char __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned char atomic_fetch_and_explicit @@ -2261,7 +2256,7 @@ inline unsigned char atomic_fetch_and_explicit inline unsigned char atomic_fetch_and ( volatile atomic_uchar* __a__, unsigned char __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned char atomic_fetch_or_explicit @@ -2270,7 +2265,7 @@ inline unsigned char atomic_fetch_or_explicit inline unsigned char atomic_fetch_or ( volatile atomic_uchar* __a__, unsigned char __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned char atomic_fetch_xor_explicit @@ -2279,7 +2274,7 @@ inline unsigned char atomic_fetch_xor_explicit inline unsigned char atomic_fetch_xor ( volatile atomic_uchar* __a__, unsigned char __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline short atomic_fetch_add_explicit @@ -2288,7 +2283,7 @@ inline short atomic_fetch_add_explicit inline short atomic_fetch_add ( volatile atomic_short* __a__, short __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline short atomic_fetch_sub_explicit @@ -2297,7 +2292,7 @@ inline short atomic_fetch_sub_explicit inline short atomic_fetch_sub ( volatile atomic_short* __a__, short __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline short atomic_fetch_and_explicit @@ -2306,7 +2301,7 @@ inline short atomic_fetch_and_explicit inline short atomic_fetch_and ( volatile atomic_short* __a__, short __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline short atomic_fetch_or_explicit @@ -2315,7 +2310,7 @@ inline short atomic_fetch_or_explicit inline short atomic_fetch_or ( volatile atomic_short* __a__, short __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline short atomic_fetch_xor_explicit @@ -2324,7 +2319,7 @@ inline short atomic_fetch_xor_explicit inline short atomic_fetch_xor ( volatile atomic_short* __a__, short __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned short atomic_fetch_add_explicit @@ -2333,7 +2328,7 @@ inline unsigned short atomic_fetch_add_explicit inline unsigned short atomic_fetch_add ( volatile atomic_ushort* __a__, unsigned short __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned short atomic_fetch_sub_explicit @@ -2342,7 +2337,7 @@ inline unsigned short atomic_fetch_sub_explicit inline unsigned short atomic_fetch_sub ( volatile atomic_ushort* __a__, unsigned short __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned short atomic_fetch_and_explicit @@ -2351,7 +2346,7 @@ inline unsigned short atomic_fetch_and_explicit inline unsigned short atomic_fetch_and ( volatile atomic_ushort* __a__, unsigned short __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned short atomic_fetch_or_explicit @@ -2360,7 +2355,7 @@ inline unsigned short atomic_fetch_or_explicit inline unsigned short atomic_fetch_or ( volatile atomic_ushort* __a__, unsigned short __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned short atomic_fetch_xor_explicit @@ -2369,7 +2364,7 @@ inline unsigned short atomic_fetch_xor_explicit inline unsigned short atomic_fetch_xor ( volatile atomic_ushort* __a__, unsigned short __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline int atomic_fetch_add_explicit @@ -2378,7 +2373,7 @@ inline int atomic_fetch_add_explicit inline int atomic_fetch_add ( volatile atomic_int* __a__, int __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline int atomic_fetch_sub_explicit @@ -2387,7 +2382,7 @@ inline int atomic_fetch_sub_explicit inline int atomic_fetch_sub ( volatile atomic_int* __a__, int __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline int atomic_fetch_and_explicit @@ -2396,7 +2391,7 @@ inline int atomic_fetch_and_explicit inline int atomic_fetch_and ( volatile atomic_int* __a__, int __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline int atomic_fetch_or_explicit @@ -2405,7 +2400,7 @@ inline int atomic_fetch_or_explicit inline int atomic_fetch_or ( volatile atomic_int* __a__, int __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline int atomic_fetch_xor_explicit @@ -2414,7 +2409,7 @@ inline int atomic_fetch_xor_explicit inline int atomic_fetch_xor ( volatile atomic_int* __a__, int __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned int atomic_fetch_add_explicit @@ -2423,7 +2418,7 @@ inline unsigned int atomic_fetch_add_explicit inline unsigned int atomic_fetch_add ( volatile atomic_uint* __a__, unsigned int __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned int atomic_fetch_sub_explicit @@ -2432,7 +2427,7 @@ inline unsigned int atomic_fetch_sub_explicit inline unsigned int atomic_fetch_sub ( volatile atomic_uint* __a__, unsigned int __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned int atomic_fetch_and_explicit @@ -2441,7 +2436,7 @@ inline unsigned int atomic_fetch_and_explicit inline unsigned int atomic_fetch_and ( volatile atomic_uint* __a__, unsigned int __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned int atomic_fetch_or_explicit @@ -2450,7 +2445,7 @@ inline unsigned int atomic_fetch_or_explicit inline unsigned int atomic_fetch_or ( volatile atomic_uint* __a__, unsigned int __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned int atomic_fetch_xor_explicit @@ -2459,7 +2454,7 @@ inline unsigned int atomic_fetch_xor_explicit inline unsigned int atomic_fetch_xor ( volatile atomic_uint* __a__, unsigned int __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline long atomic_fetch_add_explicit @@ -2468,7 +2463,7 @@ inline long atomic_fetch_add_explicit inline long atomic_fetch_add ( volatile atomic_long* __a__, long __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline long atomic_fetch_sub_explicit @@ -2477,7 +2472,7 @@ inline long atomic_fetch_sub_explicit inline long atomic_fetch_sub ( volatile atomic_long* __a__, long __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline long atomic_fetch_and_explicit @@ -2486,7 +2481,7 @@ inline long atomic_fetch_and_explicit inline long atomic_fetch_and ( volatile atomic_long* __a__, long __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline long atomic_fetch_or_explicit @@ -2495,7 +2490,7 @@ inline long atomic_fetch_or_explicit inline long atomic_fetch_or ( volatile atomic_long* __a__, long __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline long atomic_fetch_xor_explicit @@ -2504,7 +2499,7 @@ inline long atomic_fetch_xor_explicit inline long atomic_fetch_xor ( volatile atomic_long* __a__, long __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long atomic_fetch_add_explicit @@ -2513,7 +2508,7 @@ inline unsigned long atomic_fetch_add_explicit inline unsigned long atomic_fetch_add ( volatile atomic_ulong* __a__, unsigned long __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long atomic_fetch_sub_explicit @@ -2522,7 +2517,7 @@ inline unsigned long atomic_fetch_sub_explicit inline unsigned long atomic_fetch_sub ( volatile atomic_ulong* __a__, unsigned long __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long atomic_fetch_and_explicit @@ -2531,7 +2526,7 @@ inline unsigned long atomic_fetch_and_explicit inline unsigned long atomic_fetch_and ( volatile atomic_ulong* __a__, unsigned long __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long atomic_fetch_or_explicit @@ -2540,7 +2535,7 @@ inline unsigned long atomic_fetch_or_explicit inline unsigned long atomic_fetch_or ( volatile atomic_ulong* __a__, unsigned long __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long atomic_fetch_xor_explicit @@ -2549,7 +2544,7 @@ inline unsigned long atomic_fetch_xor_explicit inline unsigned long atomic_fetch_xor ( volatile atomic_ulong* __a__, unsigned long __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline long long atomic_fetch_add_explicit @@ -2558,7 +2553,7 @@ inline long long atomic_fetch_add_explicit inline long long atomic_fetch_add ( volatile atomic_llong* __a__, long long __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline long long atomic_fetch_sub_explicit @@ -2567,7 +2562,7 @@ inline long long atomic_fetch_sub_explicit inline long long atomic_fetch_sub ( volatile atomic_llong* __a__, long long __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline long long atomic_fetch_and_explicit @@ -2576,7 +2571,7 @@ inline long long atomic_fetch_and_explicit inline long long atomic_fetch_and ( volatile atomic_llong* __a__, long long __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline long long atomic_fetch_or_explicit @@ -2585,7 +2580,7 @@ inline long long atomic_fetch_or_explicit inline long long atomic_fetch_or ( volatile atomic_llong* __a__, long long __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline long long atomic_fetch_xor_explicit @@ -2594,7 +2589,7 @@ inline long long atomic_fetch_xor_explicit inline long long atomic_fetch_xor ( volatile atomic_llong* __a__, long long __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long long atomic_fetch_add_explicit @@ -2603,7 +2598,7 @@ inline unsigned long long atomic_fetch_add_explicit inline unsigned long long atomic_fetch_add ( volatile atomic_ullong* __a__, unsigned long long __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long long atomic_fetch_sub_explicit @@ -2612,7 +2607,7 @@ inline unsigned long long atomic_fetch_sub_explicit inline unsigned long long atomic_fetch_sub ( volatile atomic_ullong* __a__, unsigned long long __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long long atomic_fetch_and_explicit @@ -2621,7 +2616,7 @@ inline unsigned long long atomic_fetch_and_explicit inline unsigned long long atomic_fetch_and ( volatile atomic_ullong* __a__, unsigned long long __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long long atomic_fetch_or_explicit @@ -2630,7 +2625,7 @@ inline unsigned long long atomic_fetch_or_explicit inline unsigned long long atomic_fetch_or ( volatile atomic_ullong* __a__, unsigned long long __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline unsigned long long atomic_fetch_xor_explicit @@ -2639,7 +2634,7 @@ inline unsigned long long atomic_fetch_xor_explicit inline unsigned long long atomic_fetch_xor ( volatile atomic_ullong* __a__, unsigned long long __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } inline wchar_t atomic_fetch_add_explicit @@ -2648,7 +2643,7 @@ inline wchar_t atomic_fetch_add_explicit inline wchar_t atomic_fetch_add ( volatile atomic_wchar_t* __a__, wchar_t __m__ ) -{ atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); } inline wchar_t atomic_fetch_sub_explicit @@ -2657,7 +2652,7 @@ inline wchar_t atomic_fetch_sub_explicit inline wchar_t atomic_fetch_sub ( volatile atomic_wchar_t* __a__, wchar_t __m__ ) -{ atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); } inline wchar_t atomic_fetch_and_explicit @@ -2666,7 +2661,7 @@ inline wchar_t atomic_fetch_and_explicit inline wchar_t atomic_fetch_and ( volatile atomic_wchar_t* __a__, wchar_t __m__ ) -{ atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); } inline wchar_t atomic_fetch_or_explicit @@ -2675,7 +2670,7 @@ inline wchar_t atomic_fetch_or_explicit inline wchar_t atomic_fetch_or ( volatile atomic_wchar_t* __a__, wchar_t __m__ ) -{ atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); } inline wchar_t atomic_fetch_xor_explicit @@ -2684,7 +2679,7 @@ inline wchar_t atomic_fetch_xor_explicit inline wchar_t atomic_fetch_xor ( volatile atomic_wchar_t* __a__, wchar_t __m__ ) -{ atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } +{ return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); } #else @@ -2699,6 +2694,9 @@ _ATOMIC_LOAD_( __a__, memory_order_seq_cst ) #define atomic_load_explicit( __a__, __x__ ) \ _ATOMIC_LOAD_( __a__, __x__ ) +#define atomic_init( __a__, __m__ ) \ +_ATOMIC_INIT_( __a__, __m__ ) + #define atomic_store( __a__, __m__ ) \ _ATOMIC_STORE_( __a__, __m__, memory_order_seq_cst ) diff --git a/libatomic.cc b/libatomic.cc index a7b1fd6..e69de29 100644 --- a/libatomic.cc +++ b/libatomic.cc @@ -1,22 +0,0 @@ -#include "libatomic.h" -#include "model.h" -#include "common.h" - -void atomic_store_explicit(struct atomic_object *obj, int value, memory_order order) -{ - DBG(); - model->switch_to_master(new ModelAction(ATOMIC_WRITE, order, obj, value)); -} - -int atomic_load_explicit(struct atomic_object *obj, memory_order order) -{ - DBG(); - model->switch_to_master(new ModelAction(ATOMIC_READ, order, obj)); - return (int) thread_current()->get_return_value(); -} - -void atomic_init(struct atomic_object *obj, int value) -{ - DBG(); - model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, value)); -} diff --git a/libatomic.h b/libatomic.h index 71482c3..c804fbc 100644 --- a/libatomic.h +++ b/libatomic.h @@ -5,31 +5,18 @@ #ifndef __LIBATOMIC_H__ #define __LIBATOMIC_H__ +#include "memoryorder.h" + #ifdef __cplusplus +using std::memory_order; +using std::memory_order_relaxed; +using std::memory_order_acquire; +using std::memory_order_release; +using std::memory_order_acq_rel; +using std::memory_order_seq_cst; extern "C" { #endif - /** @brief The memory orders specified by the C11/C++11 memory models */ - typedef enum memory_order { - memory_order_relaxed, - memory_order_consume, - memory_order_acquire, - memory_order_release, - memory_order_acq_rel, - memory_order_seq_cst - } memory_order; - - typedef struct atomic_object { - int value; - } atomic_int; - - void atomic_store_explicit(struct atomic_object *obj, int value, memory_order order); -#define atomic_store(A, B) atomic_store_explicit((A), (B), memory_order_seq_cst) - - int atomic_load_explicit(struct atomic_object *obj, memory_order order); -#define atomic_load(A) atomic_load_explicit((A), memory_order_seq_cst) - - void atomic_init(struct atomic_object *obj, int value); #ifdef __cplusplus } diff --git a/libthreads.cc b/libthreads.cc index c54c9aa..2898258 100644 --- a/libthreads.cc +++ b/libthreads.cc @@ -17,7 +17,7 @@ int thrd_create(thrd_t *t, thrd_start_t start_routine, void *arg) ret = model->add_thread(thread); DEBUG("create thread %d\n", id_to_int(thrd_to_id(*t))); /* seq_cst is just a 'don't care' parameter */ - model->switch_to_master(new ModelAction(THREAD_CREATE, memory_order_seq_cst, thread, VALUE_NONE)); + model->switch_to_master(new ModelAction(THREAD_CREATE, std::memory_order_seq_cst, thread, VALUE_NONE)); return ret; } @@ -32,7 +32,7 @@ int thrd_join(thrd_t t) int thrd_yield(void) { /* seq_cst is just a 'don't care' parameter */ - return model->switch_to_master(new ModelAction(THREAD_YIELD, memory_order_seq_cst, NULL, VALUE_NONE)); + return model->switch_to_master(new ModelAction(THREAD_YIELD, std::memory_order_seq_cst, NULL, VALUE_NONE)); } thrd_t thrd_current(void) diff --git a/model.h b/model.h index a475109..76ded6c 100644 --- a/model.h +++ b/model.h @@ -14,7 +14,6 @@ #include "schedule.h" #include "mymemory.h" #include "libthreads.h" -#include "libatomic.h" #include "threads.h" #include "action.h" #include "clockvector.h" diff --git a/threads.cc b/threads.cc index e3aed76..792014e 100644 --- a/threads.cc +++ b/threads.cc @@ -35,7 +35,7 @@ void thread_startup() { CREATE event, so we don't get redundant traces... */ /* Add dummy "start" action, just to create a first clock vector */ - model->switch_to_master(new ModelAction(THREAD_START, memory_order_seq_cst, curr_thread)); + model->switch_to_master(new ModelAction(THREAD_START, std::memory_order_seq_cst, curr_thread)); /* Call the actual thread function */ curr_thread->start_routine(curr_thread->arg); diff --git a/userprog.c b/userprog.c index 33319b1..6d8b57b 100644 --- a/userprog.c +++ b/userprog.c @@ -2,8 +2,8 @@ #include #include "libthreads.h" -#include "libatomic.h" #include "librace.h" +#include "stdatomic.h" static void a(atomic_int *obj) { -- 2.34.1