#include "cyclegraph.h"
#include "promise.h"
#include "datarace.h"
+#include "mutex.h"
#define INITIAL_THREAD_ID 0
action_trace(new action_list_t()),
thread_map(new HashTable<int, Thread *, int>()),
obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
+ lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
promises(new std::vector<Promise *>()),
futurevalues(new std::vector<struct PendingFutureValue>()),
delete obj_thrd_map;
delete obj_map;
+ delete lock_waiters_map;
delete action_trace;
for (unsigned int i = 0; i < promises->size(); i++)
{
thread_id_t tid;
- /* Do not split atomic actions. */
- if (curr->is_rmwr())
- return thread_current();
- /* The THREAD_CREATE action points to the created Thread */
- else if (curr->get_type() == THREAD_CREATE)
- return (Thread *)curr->get_location();
+ if (curr!=NULL) {
+ /* Do not split atomic actions. */
+ if (curr->is_rmwr())
+ return thread_current();
+ /* The THREAD_CREATE action points to the created Thread */
+ else if (curr->get_type() == THREAD_CREATE)
+ return (Thread *)curr->get_location();
+ }
/* Have we completed exploring the preselected path? */
if (diverge == NULL)
{
action_type type = act->get_type();
- switch (type) {
- case ATOMIC_READ:
- case ATOMIC_WRITE:
- case ATOMIC_RMW:
- break;
- default:
- return NULL;
- }
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map->get_safe_ptr(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (act->is_synchronizing(prev))
- return prev;
+ if (type==ATOMIC_READ||type==ATOMIC_WRITE||type==ATOMIC_RMW) {
+ /* linear search: from most recent to oldest */
+ action_list_t *list = obj_map->get_safe_ptr(act->get_location());
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *prev = *rit;
+ if (act->is_synchronizing(prev))
+ return prev;
+ }
+ } else if (type==ATOMIC_LOCK||type==ATOMIC_TRYLOCK) {
+ /* linear search: from most recent to oldest */
+ action_list_t *list = obj_map->get_safe_ptr(act->get_location());
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *prev = *rit;
+ if (prev->is_success_lock())
+ return prev;
+ }
+ } else if (type==ATOMIC_UNLOCK) {
+ /* linear search: from most recent to oldest */
+ action_list_t *list = obj_map->get_safe_ptr(act->get_location());
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *prev = *rit;
+ if (prev->is_failed_trylock())
+ return prev;
+ }
}
return NULL;
}
}
}
+void ModelChecker::process_mutex(ModelAction *curr) {
+ std::mutex * mutex=(std::mutex *) curr->get_location();
+ struct std::mutex_state * state=mutex->get_state();
+ switch(curr->get_type()) {
+ case ATOMIC_TRYLOCK: {
+ bool success=!state->islocked;
+ curr->set_try_lock(success);
+ if (!success)
+ break;
+ }
+ //otherwise fall into the lock case
+ case ATOMIC_LOCK: {
+ if (curr->get_cv()->getClock(state->alloc_tid)<=state->alloc_clock) {
+ printf("Lock access before initialization\n");
+ set_assert();
+ }
+ state->islocked=true;
+ ModelAction *unlock=get_last_unlock(curr);
+ //synchronize with the previous unlock statement
+ curr->synchronize_with(unlock);
+ break;
+ }
+ case ATOMIC_UNLOCK: {
+ //unlock the lock
+ state->islocked=false;
+ //wake up the other threads
+ action_list_t * waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
+ //activate all the waiting threads
+ for(action_list_t::iterator rit = waiters->begin(); rit!=waiters->end(); rit++) {
+ add_thread(get_thread((*rit)->get_tid()));
+ }
+ waiters->clear();
+ break;
+ }
+ default:
+ ASSERT(0);
+ }
+}
+
+
/**
* Process a write ModelAction
* @param curr The ModelAction to process
return newcurr;
}
- newcurr = node_stack->explore_action(curr);
+ newcurr = node_stack->explore_action(curr, scheduler->get_enabled());
if (newcurr) {
/* First restore type and order in case of RMW operation */
if (curr->is_rmwr())
newcurr->copy_typeandorder(curr);
+ ASSERT(curr->get_location()==newcurr->get_location());
+
/* Discard duplicate ModelAction; use action from NodeStack */
delete curr;
return newcurr;
}
+bool ModelChecker::check_action_enabled(ModelAction *curr) {
+ if (curr->is_lock()) {
+ std::mutex * lock=(std::mutex *) curr->get_location();
+ struct std::mutex_state * state = lock->get_state();
+ if (state->islocked) {
+ //Stick the action in the appropriate waiting queue
+ lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* This is the heart of the model checker routine. It performs model-checking
* actions corresponding to a given "current action." Among other processes, it
bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
+ if (!check_action_enabled(curr)) {
+ //we'll make the execution look like we chose to run this action
+ //much later...when a lock is actually available to relese
+ remove_thread(get_current_thread());
+ get_current_thread()->set_pending(curr);
+ return get_next_thread(NULL);
+ }
+
ModelAction *newcurr = initialize_curr_action(curr);
/* Add the action to lists before any other model-checking tasks */
build_reads_from_past(curr);
curr = newcurr;
+ /* Add the action to lists before any other model-checking tasks */
+ if (!second_part_of_rmw)
+ add_action_to_lists(newcurr);
+
+ /* Build may_read_from set for newly-created actions */
+ if (curr == newcurr && curr->is_read())
+ build_reads_from_past(curr);
+ curr = newcurr;
+
/* Thread specific actions */
switch (curr->get_type()) {
case THREAD_CREATE: {
if (act->is_write() && process_write(act))
updated = true;
+ if (act->is_mutex_op())
+ process_mutex(act);
+
if (updated)
work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
break;
ModelAction * ModelChecker::get_last_action(thread_id_t tid)
{
- int nthreads = get_num_threads();
- if ((int)thrd_last_action->size() < nthreads)
- thrd_last_action->resize(nthreads);
- return (*thrd_last_action)[id_to_int(tid)];
+ int threadid=id_to_int(tid);
+ if (threadid<(int)thrd_last_action->size())
+ return (*thrd_last_action)[id_to_int(tid)];
+ else
+ return NULL;
}
/**
return NULL;
}
+ModelAction * ModelChecker::get_last_unlock(ModelAction *curr)
+{
+ void *location = curr->get_location();
+ action_list_t *list = obj_map->get_safe_ptr(location);
+ /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
+ action_list_t::reverse_iterator rit;
+ for (rit = list->rbegin(); rit != list->rend(); rit++)
+ if ((*rit)->is_unlock())
+ return *rit;
+ return NULL;
+}
+
ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
{
ModelAction *parent = get_last_action(tid);
* @return Returns true (success) if a step was taken and false otherwise.
*/
bool ModelChecker::take_step() {
- Thread *curr, *next;
-
if (has_asserted())
return false;
- curr = thread_current();
+ Thread * curr = thread_current();
if (curr) {
if (curr->get_state() == THREAD_READY) {
ASSERT(priv->current_action);
priv->nextThread = check_current_action(priv->current_action);
priv->current_action = NULL;
- if (!curr->is_blocked() && !curr->is_complete())
- scheduler->add_thread(curr);
+ if (curr->is_blocked() || curr->is_complete())
+ scheduler->remove_thread(curr);
} else {
ASSERT(false);
}
}
- next = scheduler->next_thread(priv->nextThread);
+ Thread * next = scheduler->next_thread(priv->nextThread);
/* Infeasible -> don't take any more steps */
if (!isfeasible())
/* next == NULL -> don't take any more steps */
if (!next)
return false;
+
+ if ( next->get_pending() != NULL ) {
+ //restart a pending action
+ set_current_action(next->get_pending());
+ next->set_pending(NULL);
+ next->set_state(THREAD_READY);
+ return true;
+ }
+
/* Return false only if swap fails with an error */
return (Thread::swap(&system_context, next) == 0);
}
/** Constructor */
Scheduler::Scheduler() :
+ is_enabled(NULL),
+ enabled_len(0),
+ curr_thread_index(0),
current(NULL)
{
}
+void Scheduler::set_enabled(Thread *t, bool enabled_status) {
+ int threadid=id_to_int(t->get_id());
+ if (threadid>=enabled_len) {
+ bool * new_enabled=(bool *)malloc(sizeof(bool)*(threadid+1));
+ memset(&new_enabled[enabled_len], 0, (threadid+1-enabled_len)*sizeof(bool));
+ if (is_enabled != NULL) {
+ memcpy(new_enabled, is_enabled, enabled_len*sizeof(bool));
+ free(is_enabled);
+ }
+ is_enabled=new_enabled;
+ enabled_len=threadid+1;
+ }
+ is_enabled[threadid]=enabled_status;
+}
+
/**
* Add a Thread to the scheduler's ready list.
* @param t The Thread to add
void Scheduler::add_thread(Thread *t)
{
DEBUG("thread %d\n", t->get_id());
- readyList.push_back(t);
+ set_enabled(t, true);
}
/**
{
if (current == t)
current = NULL;
- else
- readyList.remove(t);
+ set_enabled(t, false);
}
/**
*/
void Scheduler::sleep(Thread *t)
{
- remove_thread(t);
+ set_enabled(t, false);
t->set_state(THREAD_BLOCKED);
}
*/
void Scheduler::wake(Thread *t)
{
- add_thread(t);
+ set_enabled(t, true);
t->set_state(THREAD_READY);
}
*/
Thread * Scheduler::next_thread(Thread *t)
{
- if (t != NULL) {
- current = t;
- readyList.remove(t);
- } else if (readyList.empty()) {
- t = NULL;
+ if ( t == NULL ) {
+ int old_curr_thread = curr_thread_index;
+ while(true) {
+ curr_thread_index = (curr_thread_index+1) % enabled_len;
+ if (is_enabled[curr_thread_index]) {
+ t = model->get_thread(int_to_id(curr_thread_index));
+ break;
+ }
+ if (curr_thread_index == old_curr_thread) {
+ print();
+ return NULL;
+ }
+ }
} else {
- t = readyList.front();
- current = t;
- readyList.pop_front();
+ curr_thread_index = id_to_int(t->get_id());
}
+ current = t;
print();
-
return t;
}
DEBUG("Current thread: %d\n", current->get_id());
else
DEBUG("No current thread\n");
- DEBUG("Num. threads in ready list: %zu\n", readyList.size());
-
- std::list<Thread *, MyAlloc< Thread * > >::const_iterator it;
- for (it = readyList.begin(); it != readyList.end(); it++)
- DEBUG("In ready list: thread %d\n", (*it)->get_id());
}