5 #include "threads-model.h"
7 memory_order orders[6] = {
8 memory_order_relaxed, memory_order_consume, memory_order_acquire,
9 memory_order_release, memory_order_acq_rel, memory_order_seq_cst
12 /** Performs a read action.*/
13 uint64_t model_read_action(void * obj, memory_order ord) {
14 return model->switch_to_master(new ModelAction(ATOMIC_READ, ord, obj));
17 /** Performs a write action.*/
18 void model_write_action(void * obj, memory_order ord, uint64_t val) {
19 model->switch_to_master(new ModelAction(ATOMIC_WRITE, ord, obj, val));
22 /** Performs an init action. */
23 void model_init_action(void * obj, uint64_t val) {
24 model->switch_to_master(new ModelAction(ATOMIC_INIT, memory_order_relaxed, obj, val));
28 * Performs the read part of a RMW action. The next action must either be the
29 * write part of the RMW action or an explicit close out of the RMW action w/o
32 uint64_t model_rmwr_action(void *obj, memory_order ord) {
33 return model->switch_to_master(new ModelAction(ATOMIC_RMWR, ord, obj));
37 * Performs the read part of a RMW CAS action. The next action must
38 * either be the write part of the RMW action or an explicit close out
39 * of the RMW action w/o a write.
41 uint64_t model_rmwrcas_action(void *obj, memory_order ord, uint64_t oldval, int size) {
42 return model->switch_to_master(new ModelAction(ATOMIC_RMWRCAS, ord, obj, oldval, size));
45 /** Performs the write part of a RMW action. */
46 void model_rmw_action(void *obj, memory_order ord, uint64_t val) {
47 model->switch_to_master(new ModelAction(ATOMIC_RMW, ord, obj, val));
50 /** Closes out a RMW action without doing a write. */
51 void model_rmwc_action(void *obj, memory_order ord) {
52 model->switch_to_master(new ModelAction(ATOMIC_RMWC, ord, obj));
55 /** Issues a fence operation. */
56 void model_fence_action(memory_order ord) {
57 model->switch_to_master(new ModelAction(ATOMIC_FENCE, ord, FENCE_LOCATION));
60 // --------------------- helper functions --------------------------------
61 uint64_t model_rmwr_action_helper(void *obj, int atomic_index) {
62 return model->switch_to_master(new ModelAction(ATOMIC_RMWR, orders[atomic_index], obj));
65 void model_rmw_action_helper(void *obj, int atomic_index, uint64_t val) {
66 model->switch_to_master(new ModelAction(ATOMIC_RMW, orders[atomic_index], obj, val));
69 void model_rmwc_action_helper(void *obj, int atomic_index) {
70 model->switch_to_master(new ModelAction(ATOMIC_RMWC, orders[atomic_index], obj));
73 void model_fence_action_helper(int atomic_index) {
74 model->switch_to_master(new ModelAction(ATOMIC_FENCE, orders[atomic_index], FENCE_LOCATION));
78 uint8_t cds_atomic_load8(void * obj, int atomic_index) {
79 return (uint8_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
81 uint16_t cds_atomic_load16(void * obj, int atomic_index) {
82 return (uint16_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
84 uint32_t cds_atomic_load32(void * obj, int atomic_index) {
85 return (uint32_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
87 uint64_t cds_atomic_load64(void * obj, int atomic_index) {
88 return model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj));
92 void cds_atomic_store8(void * obj, int atomic_index, uint8_t val) {
93 model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
95 void cds_atomic_store16(void * obj, int atomic_index, uint16_t val) {
96 model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
98 void cds_atomic_store32(void * obj, int atomic_index, uint32_t val) {
99 model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
101 void cds_atomic_store64(void * obj, int atomic_index, uint64_t val) {
102 model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, val));
106 #define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val ) \
108 uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
109 uint##size##_t _copy = _old; \
110 _copy __op__ ( uint##size##_t ) _val; \
111 model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
115 #define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val ) \
117 uint ## size ## _t _old = model_rmwr_action_helper(addr, atomic_index); \
118 uint ## size ## _t _copy = _old; \
119 uint ## size ## _t _val = val; \
121 model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
125 // cds atomic exchange
126 uint8_t cds_atomic_exchange8(void* addr, int atomic_index, uint8_t val) {
127 _ATOMIC_RMW_( =, 8, addr, atomic_index, val);
129 uint16_t cds_atomic_exchange16(void* addr, int atomic_index, uint16_t val) {
130 _ATOMIC_RMW_( =, 16, addr, atomic_index, val);
132 uint32_t cds_atomic_exchange32(void* addr, int atomic_index, uint32_t val) {
133 _ATOMIC_RMW_( =, 32, addr, atomic_index, val);
135 uint64_t cds_atomic_exchange64(void* addr, int atomic_index, uint64_t val) {
136 _ATOMIC_RMW_( =, 64, addr, atomic_index, val);
139 // cds atomic fetch add
140 uint8_t cds_atomic_fetch_add8(void* addr, int atomic_index, uint8_t val) {
141 _ATOMIC_RMW_( +=, 8, addr, atomic_index, val);
143 uint16_t cds_atomic_fetch_add16(void* addr, int atomic_index, uint16_t val) {
144 _ATOMIC_RMW_( +=, 16, addr, atomic_index, val);
146 uint32_t cds_atomic_fetch_add32(void* addr, int atomic_index, uint32_t val) {
147 _ATOMIC_RMW_( +=, 32, addr, atomic_index, val);
149 uint64_t cds_atomic_fetch_add64(void* addr, int atomic_index, uint64_t val) {
150 _ATOMIC_RMW_( +=, 64, addr, atomic_index, val);
153 // cds atomic fetch sub
154 uint8_t cds_atomic_fetch_sub8(void* addr, int atomic_index, uint8_t val) {
155 _ATOMIC_RMW_( -=, 8, addr, atomic_index, val);
157 uint16_t cds_atomic_fetch_sub16(void* addr, int atomic_index, uint16_t val) {
158 _ATOMIC_RMW_( -=, 16, addr, atomic_index, val);
160 uint32_t cds_atomic_fetch_sub32(void* addr, int atomic_index, uint32_t val) {
161 _ATOMIC_RMW_( -=, 32, addr, atomic_index, val);
163 uint64_t cds_atomic_fetch_sub64(void* addr, int atomic_index, uint64_t val) {
164 _ATOMIC_RMW_( -=, 64, addr, atomic_index, val);
167 // cds atomic fetch and
168 uint8_t cds_atomic_fetch_and8(void* addr, int atomic_index, uint8_t val) {
169 _ATOMIC_RMW_( &=, 8, addr, atomic_index, val);
171 uint16_t cds_atomic_fetch_and16(void* addr, int atomic_index, uint16_t val) {
172 _ATOMIC_RMW_( &=, 16, addr, atomic_index, val);
174 uint32_t cds_atomic_fetch_and32(void* addr, int atomic_index, uint32_t val) {
175 _ATOMIC_RMW_( &=, 32, addr, atomic_index, val);
177 uint64_t cds_atomic_fetch_and64(void* addr, int atomic_index, uint64_t val) {
178 _ATOMIC_RMW_( &=, 64, addr, atomic_index, val);
181 // cds atomic fetch or
182 uint8_t cds_atomic_fetch_or8(void* addr, int atomic_index, uint8_t val) {
183 _ATOMIC_RMW_( |=, 8, addr, atomic_index, val);
185 uint16_t cds_atomic_fetch_or16(void* addr, int atomic_index, uint16_t val) {
186 _ATOMIC_RMW_( |=, 16, addr, atomic_index, val);
188 uint32_t cds_atomic_fetch_or32(void* addr, int atomic_index, uint32_t val) {
189 _ATOMIC_RMW_( |=, 32, addr, atomic_index, val);
191 uint64_t cds_atomic_fetch_or64(void* addr, int atomic_index, uint64_t val) {
192 _ATOMIC_RMW_( |=, 64, addr, atomic_index, val);
195 // cds atomic fetch xor
196 uint8_t cds_atomic_fetch_xor8(void* addr, int atomic_index, uint8_t val) {
197 _ATOMIC_RMW_( ^=, 8, addr, atomic_index, val);
199 uint16_t cds_atomic_fetch_xor16(void* addr, int atomic_index, uint16_t val) {
200 _ATOMIC_RMW_( ^=, 16, addr, atomic_index, val);
202 uint32_t cds_atomic_fetch_xor32(void* addr, int atomic_index, uint32_t val) {
203 _ATOMIC_RMW_( ^=, 32, addr, atomic_index, val);
205 uint64_t cds_atomic_fetch_xor64(void* addr, int atomic_index, uint64_t val) {
206 _ATOMIC_RMW_( ^=, 64, addr, atomic_index, val);
209 // cds atomic compare and exchange
210 // In order to accomodate the LLVM PASS, the return values are not true or false.
212 #define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
213 #define _ATOMIC_CMPSWP_(size, addr, expected, desired, atomic_index) \
215 uint ## size ## _t _desired = desired; \
216 uint ## size ## _t _expected = expected; \
217 uint ## size ## _t _old = model_rmwr_action_helper(addr, atomic_index); \
218 if (_old == _expected ) { \
219 model_rmw_action_helper(addr, atomic_index, (uint64_t) _desired ); return _expected; } \
221 model_rmwc_action_helper(addr, atomic_index); _expected = _old; return _old; } \
224 // expected is supposed to be a pointer to an address, but the CmpOperand
225 // extracted from LLVM IR is an integer type.
227 uint8_t cds_atomic_compare_exchange8(void* addr, uint8_t expected,
228 uint8_t desired, int atomic_index_succ, int atomic_index_fail ) {
229 _ATOMIC_CMPSWP_(8, addr, expected, desired, atomic_index_succ );
231 uint16_t cds_atomic_compare_exchange16(void* addr, uint16_t expected,
232 uint16_t desired, int atomic_index_succ, int atomic_index_fail ) {
233 _ATOMIC_CMPSWP_(16, addr, expected, desired, atomic_index_succ );
235 uint32_t cds_atomic_compare_exchange32(void* addr, uint32_t expected,
236 uint32_t desired, int atomic_index_succ, int atomic_index_fail ) {
237 _ATOMIC_CMPSWP_(32, addr, expected, desired, atomic_index_succ );
239 uint64_t cds_atomic_compare_exchange64(void* addr, uint64_t expected,
240 uint64_t desired, int atomic_index_succ, int atomic_index_fail ) {
241 _ATOMIC_CMPSWP_(64, addr, expected, desired, atomic_index_succ );
244 // cds atomic thread fence
246 void cds_atomic_thread_fence(int atomic_index) {
247 model->switch_to_master(new ModelAction(ATOMIC_FENCE, orders[atomic_index], FENCE_LOCATION));
251 #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
252 ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
253 __typeof__(__e__) __q__ = (__e__); \
254 __typeof__(__m__) __v__ = (__m__); \
256 __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
257 if (__t__ == * __q__ ) { \
258 model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
259 else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
262 #define _ATOMIC_FENCE_( __x__ ) \
263 ({ model_fence_action(__x__);})
268 #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
269 ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
270 __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
271 __typeof__(__m__) __v__ = (__m__); \
272 __typeof__((__a__)->__f__) __copy__= __old__; \
273 __copy__ __o__ __v__; \
274 model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
275 __old__ = __old__; Silence clang (-Wunused-value) \