#define CDSATOMICLOAD(size) \
uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
ensureModel(); \
- return (uint ## size ## _t) model->switch_to_master( \
+ return (uint ## size ## _t)model->switch_to_master( \
new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
}
//Do we have a non atomic write with a non-zero clock
return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
} else {
+ if (shadowval == 0)
+ return false;
struct RaceRecord *record = (struct RaceRecord *)shadowval;
return !record->isAtomic && record->writeClock != 0;
}
if (ISSHORTRECORD(shadowval)) {
*shadow = shadowval | ATOMICMASK;
} else {
+ if (shadowval == 0)
+ return;
struct RaceRecord *record = (struct RaceRecord *)shadowval;
record->isAtomic = 1;
}
thrd_last_action(1),
thrd_last_fence_release(),
priv(new struct model_snapshot_members ()),
- mo_graph(new CycleGraph()),
+ mo_graph(new CycleGraph()),
fuzzer(new Fuzzer()),
thrd_func_list(),
thrd_func_inst_lists()
void insertIntoActionList(action_list_t *list, ModelAction *act) {
action_list_t::reverse_iterator rit = list->rbegin();
modelclock_t next_seq = act->get_seq_number();
- if ((*rit)->get_seq_number() == next_seq)
+ if (rit == list->rend() || (*rit)->get_seq_number() == next_seq)
list->push_back(act);
else {
for(;rit != list->rend();rit++) {
}
}
+void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
+ action_list_t::reverse_iterator rit = list->rbegin();
+ modelclock_t next_seq = act->get_seq_number();
+ if (rit == list->rend()) {
+ act->create_cv(NULL);
+ } else if ((*rit)->get_seq_number() == next_seq) {
+ act->create_cv((*rit));
+ list->push_back(act);
+ } else {
+ for(;rit != list->rend();rit++) {
+ if ((*rit)->get_seq_number() == next_seq) {
+ act->create_cv((*rit));
+ action_list_t::iterator it = rit.base();
+ it++; //get to right sequence number
+ it++; //get to item after it
+ list->insert(it, act);
+ break;
+ }
+ }
+ }
+}
+
/**
* Performs various bookkeeping operations for a normal write. The
* complication is that we are typically inserting a normal write
int tid = id_to_int(act->get_tid());
action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
insertIntoActionList(list, act);
- insertIntoActionList(&action_trace, act);
+ insertIntoActionListAndSetCV(&action_trace, act);
// Update obj_thrd_map, a per location, per thread, order of actions
SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
HashSet <_Key, _KeyInt, _Shift, _malloc, _calloc, _free, hash_function, equals> * set;
};
-template<typename _Key, typename _KeyInt, int _Shift = 0, void * (* _malloc)(size_t) = snapshot_malloc, void * (* _calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free, unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
+template<typename _Key, typename _KeyInt, int _Shift = 0, void * (*_malloc)(size_t) = snapshot_malloc, void * (*_calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free, unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
class HashSet {
public:
HashSet(unsigned int initialcapacity = 16, double factor = 0.5) :
* @tparam _free Provide your own 'free' for the table, or default to
* snapshotting.
*/
-template<typename _Key, typename _Val, typename _KeyInt, int _Shift = 0, void * (* _malloc)(size_t) = snapshot_malloc, void * (* _calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free, unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
+template<typename _Key, typename _Val, typename _KeyInt, int _Shift = 0, void * (*_malloc)(size_t) = snapshot_malloc, void * (*_calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free, unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
class HashTable {
public:
/**
#define _ATOMIC_LOAD_( __a__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__((__a__)->__f__) __r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__); \
+ __typeof__((__a__)->__f__)__r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__); \
__r__; })
#define _ATOMIC_STORE_( __a__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__m__) __v__ = (__m__); \
+ __typeof__(__m__)__v__ = (__m__); \
model_write_action((void *) __p__, __x__, (uint64_t) __v__); \
__v__ = __v__; /* Silence clang (-Wunused-value) */ \
})
#define _ATOMIC_INIT_( __a__, __m__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__m__) __v__ = (__m__); \
+ __typeof__(__m__)__v__ = (__m__); \
model_init_action((void *) __p__, (uint64_t) __v__); \
__v__ = __v__; /* Silence clang (-Wunused-value) */ \
})
#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__); \
- __typeof__(__m__) __v__ = (__m__); \
- __typeof__((__a__)->__f__) __copy__= __old__; \
+ __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__); \
+ __typeof__(__m__)__v__ = (__m__); \
+ __typeof__((__a__)->__f__)__copy__= __old__; \
__copy__ __o__ __v__; \
model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
__old__ = __old__; /* Silence clang (-Wunused-value) */ \
#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__e__) __q__ = (__e__); \
- __typeof__(__m__) __v__ = (__m__); \
+ __typeof__(__e__)__q__ = (__e__); \
+ __typeof__(__m__)__v__ = (__m__); \
bool __r__; \
- __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__))model_rmwrcas_action((void *)__p__, __x__, (uint64_t) *__q__, sizeof((__a__)->__f__)); \
+ __typeof__((__a__)->__f__)__t__=(__typeof__((__a__)->__f__))model_rmwrcas_action((void *)__p__, __x__, (uint64_t) *__q__, sizeof((__a__)->__f__)); \
if (__t__ == *__q__ ) {; \
model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
{
volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
- __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
- __typeof__((__a__)->__f__) __copy__= __old__;
+ __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+ __typeof__((__a__)->__f__)__copy__= __old__;
__copy__ = (void *) (((char *)__copy__) + __m__);
model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
return __old__;
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
{
volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
- __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
- __typeof__((__a__)->__f__) __copy__= __old__;
+ __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+ __typeof__((__a__)->__f__)__copy__= __old__;
__copy__ = (void *) (((char *)__copy__) - __m__);
model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
return __old__;
#define is_normal_mo(x) ((x >= memory_order_relaxed && x <= memory_order_seq_cst) || x == memory_order_normal)
#define assert_infer(x) for (int i = 0;i <= wildcardNum;i++) \
- ASSERT(is_normal_mo_infer((x[i])));
+ ASSERT(is_normal_mo_infer((x[i])));
#define assert_infers(x) for (ModelList<memory_order *>::iterator iter = \
(x)->begin();iter != (x)->end();iter++) \
- assert_infer((*iter));
+ assert_infer((*iter));
#define relaxed memory_order_relaxed
#define release memory_order_release
bugs->size(),
bugs->size() > 1 ? "s" : "");
for (unsigned int i = 0;i < bugs->size();i++)
- (*bugs)[i]->print();
+ (*bugs)[i] -> print();
}
/**
*/
void ModelChecker::record_stats()
{
- stats.num_total++;
+ stats.num_total ++;
if (!execution->isfeasibleprefix())
- stats.num_infeasible++;
+ stats.num_infeasible ++;
else if (execution->have_bug_reports())
- stats.num_buggy_executions++;
+ stats.num_buggy_executions ++;
else if (execution->is_complete_execution())
- stats.num_complete++;
+ stats.num_complete ++;
else {
- stats.num_redundant++;
+ stats.num_redundant ++;
/**
* @todo We can violate this ASSERT() when fairness/sleep sets
return true;
}
// test code
- execution_number++;
+ execution_number ++;
reset_to_initial_state();
return false;
}
/** @brief Run trace analyses on complete trace */
void ModelChecker::run_trace_analyses() {
- for (unsigned int i = 0;i < trace_analyses.size();i++)
- trace_analyses[i]->analyze(execution->get_action_trace());
+ for (unsigned int i = 0;i < trace_analyses.size();i ++)
+ trace_analyses[i] -> analyze(execution->get_action_trace());
}
/**