thrd_last_action(1),
thrd_last_fence_release(),
priv(new struct model_snapshot_members ()),
- mo_graph(new CycleGraph()),
+ mo_graph(new CycleGraph()),
fuzzer(new Fuzzer()),
thrd_func_list(),
- thrd_func_inst_lists()
+ thrd_func_inst_lists(),
+ isfinished(false)
{
/* Initialize a model-checker thread, for special ModelActions */
model_thread = new Thread(get_next_id());
* @param curr The current action
* @return True if synchronization was updated or a thread completed
*/
-bool ModelExecution::process_thread_action(ModelAction *curr)
+void ModelExecution::process_thread_action(ModelAction *curr)
{
- bool updated = false;
-
switch (curr->get_type()) {
case THREAD_CREATE: {
thrd_t *thrd = (thrd_t *)curr->get_location();
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
break;
}
case PTHREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
break; // WL: to be add (modified)
}
+ case THREADONLY_FINISH:
case THREAD_FINISH: {
Thread *th = get_thread(curr);
+ if (curr == THREAD_FINISH &&
+ th == model->getInitThread()) {
+ th->complete();
+ setFinished();
+ break;
+ }
+
/* Wake up any joining threads */
for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *waiting = get_thread(int_to_id(i));
scheduler->wake(waiting);
}
th->complete();
- updated = true; /* trigger rel-seq checks */
break;
}
case THREAD_START: {
default:
break;
}
-
- return updated;
}
/**
SnapVector<func_id_list_t> * get_thrd_func_list() { return &thrd_func_list; }
SnapVector< SnapList<func_inst_list_t *> *> * get_thrd_func_inst_lists() { return &thrd_func_inst_lists; }
+ bool isFinished() {return isfinished;}
+ void setFinished() {isfinished = true;}
SNAPSHOTALLOC
private:
bool process_fence(ModelAction *curr);
bool process_mutex(ModelAction *curr);
- bool process_thread_action(ModelAction *curr);
+ void process_thread_action(ModelAction *curr);
void read_from(ModelAction *act, ModelAction *rf);
bool synchronize(const ModelAction *first, ModelAction *second);
* This data structure is handled by ModelHistory
*/
SnapVector< SnapList< func_inst_list_t *> *> thrd_func_inst_lists;
+ bool isfinished;
};
#endif /* __EXECUTION_H__ */
bool FuncInst::is_read() const
{
- return type == ATOMIC_READ || type == ATOMIC_RMWR || type == ATOMIC_RMWRCAS; /* type == ATOMIC_RMW ? */
+ return type == ATOMIC_READ || type == ATOMIC_RMWR || type == ATOMIC_RMWRCAS; /* type == ATOMIC_RMW ? */
}
bool FuncInst::is_write() const
{
- return type == ATOMIC_WRITE || type == ATOMIC_RMW || type == ATOMIC_INIT || type == ATOMIC_UNINIT || type == NONATOMIC_WRITE;
+ return type == ATOMIC_WRITE || type == ATOMIC_RMW || type == ATOMIC_INIT || type == ATOMIC_UNINIT || type == NONATOMIC_WRITE;
}
func_inst_list_mt successors;
};
-#endif /* __FUNCINST_H__ */
+#endif /* __FUNCINST_H__ */
uint32_t old_size = thrd_read_map.size();
if (old_size <= tid) {
thrd_read_map.resize(tid + 1);
- for (uint32_t i = old_size; i < tid + 1; i++)
+ for (uint32_t i = old_size;i < tid + 1;i++)
thrd_read_map[i] = new read_map_t();
}
void FuncNode::generate_predicate(FuncInst *func_inst)
{
-
+
}
/* @param tid thread id
ModelList<void *> read_locations;
};
-#endif /* __FUNCNODE_H__ */
+#endif /* __FUNCNODE_H__ */
HashTable<void *, write_set_t *, uintptr_t, 4, model_malloc, model_calloc, model_free> write_history;
};
-#endif /* __HISTORY_H__ */
+#endif /* __HISTORY_H__ */
#define _ATOMIC_LOAD_( __a__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__((__a__)->__f__)__r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__); \
+ __typeof__((__a__)->__f__) __r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__); \
__r__; })
#define _ATOMIC_STORE_( __a__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__m__)__v__ = (__m__); \
+ __typeof__(__m__) __v__ = (__m__); \
model_write_action((void *) __p__, __x__, (uint64_t) __v__); \
__v__ = __v__; /* Silence clang (-Wunused-value) */ \
})
#define _ATOMIC_INIT_( __a__, __m__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__m__)__v__ = (__m__); \
+ __typeof__(__m__) __v__ = (__m__); \
model_init_action((void *) __p__, (uint64_t) __v__); \
__v__ = __v__; /* Silence clang (-Wunused-value) */ \
})
#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__); \
- __typeof__(__m__)__v__ = (__m__); \
- __typeof__((__a__)->__f__)__copy__= __old__; \
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__); \
+ __typeof__(__m__) __v__ = (__m__); \
+ __typeof__((__a__)->__f__) __copy__= __old__; \
__copy__ __o__ __v__; \
model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
__old__ = __old__; /* Silence clang (-Wunused-value) */ \
#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__); \
- __typeof__(__e__)__q__ = (__e__); \
- __typeof__(__m__)__v__ = (__m__); \
+ __typeof__(__e__) __q__ = (__e__); \
+ __typeof__(__m__) __v__ = (__m__); \
bool __r__; \
- __typeof__((__a__)->__f__)__t__=(__typeof__((__a__)->__f__))model_rmwrcas_action((void *)__p__, __x__, (uint64_t) *__q__, sizeof((__a__)->__f__)); \
+ __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__))model_rmwrcas_action((void *)__p__, __x__, (uint64_t) *__q__, sizeof((__a__)->__f__)); \
if (__t__ == *__q__ ) {; \
model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
{
volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
- __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
- __typeof__((__a__)->__f__)__copy__= __old__;
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+ __typeof__((__a__)->__f__) __copy__= __old__;
__copy__ = (void *) (((char *)__copy__) + __m__);
model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
return __old__;
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
{
volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
- __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
- __typeof__((__a__)->__f__)__copy__= __old__;
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+ __typeof__((__a__)->__f__) __copy__= __old__;
__copy__ = (void *) (((char *)__copy__) - __m__);
model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
return __old__;
#define is_normal_mo(x) ((x >= memory_order_relaxed && x <= memory_order_seq_cst) || x == memory_order_normal)
#define assert_infer(x) for (int i = 0;i <= wildcardNum;i++) \
- ASSERT(is_normal_mo_infer((x[i])));
+ ASSERT(is_normal_mo_infer((x[i])));
#define assert_infers(x) for (ModelList<memory_order *>::iterator iter = \
(x)->begin();iter != (x)->end();iter++) \
- assert_infer((*iter));
+ assert_infer((*iter));
#define relaxed memory_order_relaxed
#define release memory_order_release
bugs->size(),
bugs->size() > 1 ? "s" : "");
for (unsigned int i = 0;i < bugs->size();i++)
- (*bugs)[i] -> print();
+ (*bugs)[i]->print();
}
/**
*/
void ModelChecker::record_stats()
{
- stats.num_total ++;
+ stats.num_total++;
if (!execution->isfeasibleprefix())
- stats.num_infeasible ++;
+ stats.num_infeasible++;
else if (execution->have_bug_reports())
- stats.num_buggy_executions ++;
+ stats.num_buggy_executions++;
else if (execution->is_complete_execution())
- stats.num_complete ++;
+ stats.num_complete++;
else {
- stats.num_redundant ++;
+ stats.num_redundant++;
/**
* @todo We can violate this ASSERT() when fairness/sleep sets
return true;
}
// test code
- execution_number ++;
+ execution_number++;
reset_to_initial_state();
return false;
}
/** @brief Run trace analyses on complete trace */
void ModelChecker::run_trace_analyses() {
- for (unsigned int i = 0;i < trace_analyses.size();i ++)
- trace_analyses[i] -> analyze(execution->get_action_trace());
+ for (unsigned int i = 0;i < trace_analyses.size();i++)
+ trace_analyses[i]->analyze(execution->get_action_trace());
}
/**
else if (execution->isfeasibleprefix() && execution->have_fatal_bug_reports()) {
execution->set_assert();
return true;
+ } else if (execution->isFinished()) {
+ return true;
}
return false;
}
void Predicate::add_predicate(predicate_expr predicate)
{
ModelList<predicate_expr>::iterator it;
- for (it = predicates.begin(); it != predicates.end(); it++) {
+ for (it = predicates.begin();it != predicates.end();it++) {
if (predicate == *it)
return;
}
} token_t;
/* If token is EQUALITY, then the predicate asserts whether
- * this load should read the same value as the last value
+ * this load should read the same value as the last value
* read at memory location specified in predicate_expr.
*/
struct predicate_expr {