/* --- helper functions --- */
uint64_t model_rmwrcas_action_helper(void *obj, int atomic_index, uint64_t oldval, int size, const char *position) {
ensureModel();
- return model->switch_to_master(new ModelAction(ATOMIC_RMWRCAS, position, orders[atomic_index], obj, oldval, size));
+ return model->switch_thread(new ModelAction(ATOMIC_RMWRCAS, position, orders[atomic_index], obj, oldval, size));
}
uint64_t model_rmwr_action_helper(void *obj, int atomic_index, const char *position) {
ensureModel();
- return model->switch_to_master(new ModelAction(ATOMIC_RMWR, position, orders[atomic_index], obj));
+ return model->switch_thread(new ModelAction(ATOMIC_RMWR, position, orders[atomic_index], obj));
}
void model_rmw_action_helper(void *obj, uint64_t val, int atomic_index, const char * position) {
ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_RMW, position, orders[atomic_index], obj, val));
+ model->switch_thread(new ModelAction(ATOMIC_RMW, position, orders[atomic_index], obj, val));
}
void model_rmwc_action_helper(void *obj, int atomic_index, const char *position) {
ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_RMWC, position, orders[atomic_index], obj));
+ model->switch_thread(new ModelAction(ATOMIC_RMWC, position, orders[atomic_index], obj));
}
// cds volatile loads
#define VOLATILELOAD(size) \
uint ## size ## _t cds_volatile_load ## size(void * obj, const char * position) { \
ensureModel(); \
- return (uint ## size ## _t)model->switch_to_master(new ModelAction(ATOMIC_READ, position, memory_order_volatile_load, obj)); \
+ return (uint ## size ## _t)model->switch_thread(new ModelAction(ATOMIC_READ, position, memory_order_volatile_load, obj)); \
}
VOLATILELOAD(8)
#define VOLATILESTORE(size) \
void cds_volatile_store ## size (void * obj, uint ## size ## _t val, const char * position) { \
ensureModel(); \
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, memory_order_volatile_store, obj, (uint64_t) val)); \
+ model->switch_thread(new ModelAction(ATOMIC_WRITE, position, memory_order_volatile_store, obj, (uint64_t) val)); \
*((volatile uint ## size ## _t *)obj) = val; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
#define CDSATOMICINT(size) \
void cds_atomic_init ## size (void * obj, uint ## size ## _t val, const char * position) { \
ensureModel(); \
- model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val)); \
+ model->switch_thread(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val)); \
*((volatile uint ## size ## _t *)obj) = val; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
#define CDSATOMICLOAD(size) \
uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
ensureModel(); \
- uint ## size ## _t val = (uint ## size ## _t)model->switch_to_master( \
+ uint ## size ## _t val = (uint ## size ## _t)model->switch_thread( \
new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
#define CDSATOMICSTORE(size) \
void cds_atomic_store ## size(void * obj, uint ## size ## _t val, int atomic_index, const char * position) { \
ensureModel(); \
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val)); \
+ model->switch_thread(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val)); \
*((volatile uint ## size ## _t *)obj) = val; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
// cds atomic thread fence
void cds_atomic_thread_fence(int atomic_index, const char * position) {
- model->switch_to_master(
+ model->switch_thread(
new ModelAction(ATOMIC_FENCE, position, orders[atomic_index], FENCE_LOCATION)
);
}
}
void condition_variable::notify_one() {
- model->switch_to_master(new ModelAction(ATOMIC_NOTIFY_ONE, std::memory_order_seq_cst, this));
+ model->switch_thread(new ModelAction(ATOMIC_NOTIFY_ONE, std::memory_order_seq_cst, this));
}
void condition_variable::notify_all() {
- model->switch_to_master(new ModelAction(ATOMIC_NOTIFY_ALL, std::memory_order_seq_cst, this));
+ model->switch_thread(new ModelAction(ATOMIC_NOTIFY_ALL, std::memory_order_seq_cst, this));
}
void condition_variable::wait(mutex& lock) {
- model->switch_to_master(new ModelAction(ATOMIC_WAIT, std::memory_order_seq_cst, this, (uint64_t) &lock));
+ model->switch_thread(new ModelAction(ATOMIC_WAIT, std::memory_order_seq_cst, this, (uint64_t) &lock));
//relock as a second action
lock.lock();
}
/** A no-op, for now */
void thrd_yield(void)
{
- model->switch_to_master(new ModelAction(THREAD_YIELD, std::memory_order_seq_cst, thread_current(), VALUE_NONE));
+ model->switch_thread(new ModelAction(THREAD_YIELD, std::memory_order_seq_cst, thread_current(), VALUE_NONE));
}
thrd_t thrd_current(void)
return old->get_return_value();
}
+void ModelChecker::startRunExecution(Thread *old)
+{
+ if (params.traceminsize != 0 &&
+ execution->get_curr_seq_num() > checkfree) {
+ checkfree += params.checkthreshold;
+ execution->collectActions();
+ }
+
+ thread_chosen = false;
+ curr_thread_num = 1;
+ Thread *thr = getNextThread();
+ if (thr != nullptr) {
+ scheduler->set_current_thread(thr);
+ if (old) {
+ if (Thread::swap(old, thr) < 0) {
+ perror("swap threads");
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ if (Thread::swap(&system_context, thr) < 0) {
+ perror("swap threads");
+ exit(EXIT_FAILURE);
+ }
+ }
+ } else
+ handleChosenThread(old);
+}
+
+Thread* ModelChecker::getNextThread()
+{
+ Thread *nextThread = nullptr;
+ for (unsigned int i = curr_thread_num; i < get_num_threads(); i++) {
+ thread_id_t tid = int_to_id(i);
+ Thread *thr = get_thread(tid);
+
+ if (!thr->is_complete() && !thr->get_pending()) {
+ curr_thread_num = i;
+ nextThread = thr;
+ break;
+ }
+ ModelAction *act = thr->get_pending();
+
+ if (act && execution->is_enabled(thr) && !execution->check_action_enabled(act)) {
+ scheduler->sleep(thr);
+ }
+ chooseThread(act, thr);
+ }
+ return nextThread;
+}
+
+/* Swap back to system_context and terminate this execution */
+void ModelChecker::finishRunExecution(Thread *old)
+{
+ scheduler->set_current_thread(NULL);
+ if (old != NULL) {
+ if (Thread::swap(old, &system_context) < 0) {
+ perror("swap threads");
+ exit(EXIT_FAILURE);
+ }
+ }
+ break_execution = true;
+}
+
+void ModelChecker::consumeAction()
+{
+ ModelAction *curr = chosen_thread->get_pending();
+ Thread * th = thread_current();
+ if (curr->get_type() == THREAD_FINISH && th != NULL) {
+ // Thread finish must be consumed in the master context
+ scheduler->set_current_thread(NULL);
+ if (Thread::swap(th, &system_context) < 0) {
+ perror("swap threads");
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ chosen_thread->set_pending(NULL);
+ chosen_thread = execution->take_step(curr);
+ }
+}
+
+void ModelChecker::chooseThread(ModelAction *act, Thread *thr)
+{
+ if (!thread_chosen && act && execution->is_enabled(thr) && (thr->get_state() != THREAD_BLOCKED) ) {
+ if (act->is_write()) {
+ std::memory_order order = act->get_mo();
+ if (order == std::memory_order_relaxed || \
+ order == std::memory_order_release) {
+ chosen_thread = thr;
+ thread_chosen = true;
+ }
+ } else if (act->get_type() == THREAD_CREATE || \
+ act->get_type() == PTHREAD_CREATE || \
+ act->get_type() == THREAD_START || \
+ act->get_type() == THREAD_FINISH) {
+ chosen_thread = thr;
+ thread_chosen = true;
+ }
+ }
+}
+
+uint64_t ModelChecker::switch_thread(ModelAction *act)
+{
+ if (modellock) {
+ static bool fork_message_printed = false;
+
+ if (!fork_message_printed) {
+ model_print("Fork handler or dead thread trying to call into model checker...\n");
+ fork_message_printed = true;
+ }
+ delete act;
+ return 0;
+ }
+ DBG();
+ Thread *old = thread_current();
+ ASSERT(!old->get_pending());
+
+ if (inspect_plugin != NULL) {
+ inspect_plugin->inspectModelAction(act);
+ }
+
+ old->set_pending(act);
+
+ if (old->is_waiting_on(old))
+ assert_bug("Deadlock detected (thread %u)", curr_thread_num);
+
+ if (act && execution->is_enabled(old) && !execution->check_action_enabled(act)) {
+ scheduler->sleep(old);
+ }
+ chooseThread(act, old);
+
+ curr_thread_num++;
+ Thread* next = getNextThread();
+ if (next != nullptr)
+ handleNewValidThread(old, next);
+ else {
+ old->set_state(THREAD_READY); // Just to avoid the first ASSERT in ModelExecution::take_step
+ handleChosenThread(old);
+ }
+
+ return old->get_return_value();
+}
+
+void ModelChecker::handleNewValidThread(Thread *old, Thread *next)
+{
+ scheduler->set_current_thread(next);
+
+ if (Thread::swap(old, next) < 0) {
+ perror("swap threads");
+ exit(EXIT_FAILURE);
+ }
+}
+
+void ModelChecker::handleChosenThread(Thread *old)
+{
+ Thread * th = old ? old : thread_current();
+ if (execution->has_asserted()) {
+ finishRunExecution(th);
+ return;
+ }
+ if (!chosen_thread)
+ chosen_thread = get_next_thread();
+ if (!chosen_thread || chosen_thread->is_model_thread()) {
+ finishRunExecution(th);
+ return;
+ }
+ if (chosen_thread->just_woken_up()) {
+ chosen_thread->set_wakeup_state(false);
+ chosen_thread->set_pending(NULL);
+ chosen_thread = NULL;
+ // Allow this thread to stash the next pending action
+// if (should_terminate_execution())
+// finishRunExecution(th);
+// else
+ startRunExecution(th);
+ } else {
+ /* Consume the next action for a Thread */
+ consumeAction();
+
+ if (should_terminate_execution())
+ finishRunExecution(th);
+ else
+ startRunExecution(th);
+ }
+}
+
static void runChecker() {
model->run();
delete model;
//Need to initial random number generator state to avoid resets on rollback
char random_state[256];
initstate(423121, random_state, sizeof(random_state));
- modelclock_t checkfree = params.checkthreshold;
+ checkfree = params.checkthreshold;
for(int exec = 0;exec < params.maxexecutions;exec++) {
- Thread * t = init_thread;
-
+ chosen_thread = init_thread;
+ break_execution = false;
do {
- /* Check whether we need to free model actions. */
-
- if (params.traceminsize != 0 &&
- execution->get_curr_seq_num() > checkfree) {
- checkfree += params.checkthreshold;
- execution->collectActions();
- }
-
- /*
- * Stash next pending action(s) for thread(s). There
- * should only need to stash one thread's action--the
- * thread which just took a step--plus the first step
- * for any newly-created thread
- */
- for (unsigned int i = 0;i < get_num_threads();i++) {
- thread_id_t tid = int_to_id(i);
- Thread *thr = get_thread(tid);
- if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
- switch_from_master(thr);
- if (thr->is_waiting_on(thr))
- assert_bug("Deadlock detected (thread %u)", i);
- }
- }
-
- /* Don't schedule threads which should be disabled */
- for (unsigned int i = 0;i < get_num_threads();i++) {
- Thread *th = get_thread(int_to_id(i));
- ModelAction *act = th->get_pending();
- if (act && execution->is_enabled(th) && !execution->check_action_enabled(act)) {
- scheduler->sleep(th);
- }
- }
-
- for (unsigned int i = 1;i < get_num_threads();i++) {
- Thread *th = get_thread(int_to_id(i));
- ModelAction *act = th->get_pending();
- if (act && execution->is_enabled(th) && (th->get_state() != THREAD_BLOCKED) ) {
- if (act->is_write()) {
- std::memory_order order = act->get_mo();
- if (order == std::memory_order_relaxed || \
- order == std::memory_order_release) {
- t = th;
- break;
- }
- } else if (act->get_type() == THREAD_CREATE || \
- act->get_type() == PTHREAD_CREATE || \
- act->get_type() == THREAD_START || \
- act->get_type() == THREAD_FINISH) {
- t = th;
- break;
- }
- }
- }
-
- /* Catch assertions from prior take_step or from
- * between-ModelAction bugs (e.g., data races) */
-
- if (execution->has_asserted())
- break;
- if (!t)
- t = get_next_thread();
- if (!t || t->is_model_thread())
+ if (break_execution)
break;
- if (t->just_woken_up()) {
- t->set_wakeup_state(false);
- t->set_pending(NULL);
- t = NULL;
- continue; // Allow this thread to stash the next pending action
- }
- /* Consume the next action for a Thread */
- ModelAction *curr = t->get_pending();
- t->set_pending(NULL);
- t = execution->take_step(curr);
+ startRunExecution(NULL);
} while (!should_terminate_execution());
+
finish_execution((exec+1) < params.maxexecutions);
//restore random number generator state after rollback
setstate(random_state);
void switch_from_master(Thread *thread);
uint64_t switch_to_master(ModelAction *act);
+ uint64_t switch_thread(ModelAction *act);
+
+ void startRunExecution(Thread *old);
+ void finishRunExecution(Thread *old);
+ void consumeAction();
+ void chooseThread(ModelAction *act, Thread *thr);
+ Thread * getNextThread();
+ void handleChosenThread(Thread *old);
+ void handleNewValidThread(Thread *old, Thread *next);
void assert_bug(const char *msg, ...);
int execution_number;
+ unsigned int curr_thread_num;
+
+ Thread * chosen_thread;
+
+ bool thread_chosen;
+ bool break_execution;
+
+ modelclock_t checkfree;
+
unsigned int get_num_threads() const;
void finish_execution(bool moreexecutions);
void mutex::lock()
{
- model->switch_to_master(new ModelAction(ATOMIC_LOCK, std::memory_order_seq_cst, this));
+ model->switch_thread(new ModelAction(ATOMIC_LOCK, std::memory_order_seq_cst, this));
}
bool mutex::try_lock()
{
- return model->switch_to_master(new ModelAction(ATOMIC_TRYLOCK, std::memory_order_seq_cst, this));
+ return model->switch_thread(new ModelAction(ATOMIC_TRYLOCK, std::memory_order_seq_cst, this));
}
void mutex::unlock()
{
- model->switch_to_master(new ModelAction(ATOMIC_UNLOCK, std::memory_order_seq_cst, this));
+ model->switch_thread(new ModelAction(ATOMIC_UNLOCK, std::memory_order_seq_cst, this));
}
}
struct pthread_params params = { start_routine, arg };
- ModelAction *act = new ModelAction(PTHREAD_CREATE, std::memory_order_seq_cst, t, (uint64_t)¶ms);
-
/* seq_cst is just a 'don't care' parameter */
- model->switch_to_master(act);
+ model->switch_to_master(new ModelAction(PTHREAD_CREATE, std::memory_order_seq_cst, t, (uint64_t)¶ms));
return 0;
}
/* Take care of both pthread_yield and c++ thread yield */
int sched_yield() {
- model->switch_to_master(new ModelAction(THREAD_YIELD, std::memory_order_seq_cst, thread_current(), VALUE_NONE));
+ model->switch_thread(new ModelAction(THREAD_YIELD, std::memory_order_seq_cst, thread_current(), VALUE_NONE));
return 0;
}
cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
- model->switch_to_master(new ModelAction(ATOMIC_TIMEDWAIT, std::memory_order_seq_cst, v, (uint64_t) m));
+ model->switch_thread(new ModelAction(ATOMIC_TIMEDWAIT, std::memory_order_seq_cst, v, (uint64_t) m));
m->lock();
// model_print("Timed_wait is called\n");
static int swap(ucontext_t *ctxt, Thread *t);
static int swap(Thread *t, ucontext_t *ctxt);
+ static int swap(Thread *t, Thread *t2);
thread_state get_state() const { return state; }
void set_state(thread_state s);
Thread * curr_thread = thread_current();
/* Add dummy "start" action, just to create a first clock vector */
- model->switch_to_master(new ModelAction(THREAD_START, std::memory_order_seq_cst, curr_thread));
+ model->switch_thread(new ModelAction(THREAD_START, std::memory_order_seq_cst, curr_thread));
real_init_all();
return model_swapcontext(ctxt, &t->context);
}
+int Thread::swap(Thread *t, Thread *t2)
+{
+ t->set_state(THREAD_READY);
+ t2->set_state(THREAD_RUNNING);
+#ifdef TLS
+ if (t2->tls != NULL)
+ set_tls_addr((uintptr_t)t2->tls);
+#endif
+ return model_swapcontext(&t->context, &t2->context);
+}
/** Terminate a thread and free its stack. */
void Thread::complete()