return hash;
}
-/**
- * @brief Checks the NodeStack to see if a ModelAction is in our may-read-from set
- * @param write The ModelAction to check for
- * @return True if the ModelAction is found; false otherwise
- */
-bool ModelAction::may_read_from(const ModelAction *write) const
-{
- for (int i = 0; i < node->get_read_from_past_size(); i++)
- if (node->get_read_from_past(i) == write)
- return true;
- return false;
-}
-
/**
* Only valid for LOCK, TRY_LOCK, UNLOCK, and WAIT operations.
* @return The mutex operated on by this action, if any; otherwise NULL
bool equals(const ModelAction *x) const { return this == x; }
- bool may_read_from(const ModelAction *write) const;
MEMALLOC
void set_value(uint64_t val) { value = val; }
const struct model_params *params,
Scheduler *scheduler,
NodeStack *node_stack) :
- pthread_counter(0),
model(m),
params(params),
scheduler(scheduler),
action_trace(),
thread_map(2), /* We'll always need at least 2 threads */
pthread_map(0),
+ pthread_counter(0),
obj_map(),
condvar_waiters_map(),
obj_thrd_map(),
mutex_map(),
- cond_map(),
- pending_rel_seqs(),
thrd_last_action(1),
thrd_last_fence_release(),
node_stack(node_stack),
return blocking_threads;
}
-/**
- * @brief Check if we are yield-blocked
- *
- * A program can be "yield-blocked" if all threads are ready to execute a
- * yield.
- *
- * @return True if the program is yield-blocked; false otherwise
- */
-bool ModelExecution::is_yieldblocked() const
-{
- if (!params->yieldblock)
- return false;
-
- for (unsigned int i = 0; i < get_num_threads(); i++) {
- thread_id_t tid = int_to_id(i);
- Thread *t = get_thread(tid);
- if (t->get_pending() && t->get_pending()->is_yield())
- return true;
- }
- return false;
-}
-
/**
* Check if this is a complete execution. That is, have all thread completed
* execution (rather than exiting because sleep sets have forced a redundant
*/
bool ModelExecution::is_complete_execution() const
{
- if (is_yieldblocked())
- return false;
for (unsigned int i = 0; i < get_num_threads(); i++)
if (is_enabled(int_to_id(i)))
return false;
return true;
}
-/**
- * @brief Find the last fence-related backtracking conflict for a ModelAction
- *
- * This function performs the search for the most recent conflicting action
- * against which we should perform backtracking, as affected by fence
- * operations. This includes pairs of potentially-synchronizing actions which
- * occur due to fence-acquire or fence-release, and hence should be explored in
- * the opposite execution order.
- *
- * @param act The current action
- * @return The most recent action which conflicts with act due to fences
- */
-ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
-{
- /* Only perform release/acquire fence backtracking for stores */
- if (!act->is_write())
- return NULL;
-
- /* Find a fence-release (or, act is a release) */
- ModelAction *last_release;
- if (act->is_release())
- last_release = act;
- else
- last_release = get_last_fence_release(act->get_tid());
- if (!last_release)
- return NULL;
-
- /* Skip past the release */
- const action_list_t *list = &action_trace;
- action_list_t::const_reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++)
- if (*rit == last_release)
- break;
- ASSERT(rit != list->rend());
-
- /* Find a prior:
- * load-acquire
- * or
- * load --sb-> fence-acquire */
- ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
- ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
- bool found_acquire_fences = false;
- for ( ; rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (act->same_thread(prev))
- continue;
-
- int tid = id_to_int(prev->get_tid());
-
- if (prev->is_read() && act->same_var(prev)) {
- if (prev->is_acquire()) {
- /* Found most recent load-acquire, don't need
- * to search for more fences */
- if (!found_acquire_fences)
- return NULL;
- } else {
- prior_loads[tid] = prev;
- }
- }
- if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
- found_acquire_fences = true;
- acquire_fences[tid] = prev;
- }
- }
-
- ModelAction *latest_backtrack = NULL;
- for (unsigned int i = 0; i < acquire_fences.size(); i++)
- if (acquire_fences[i] && prior_loads[i])
- if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
- latest_backtrack = acquire_fences[i];
- return latest_backtrack;
-}
-
-/**
- * @brief Find the last backtracking conflict for a ModelAction
- *
- * This function performs the search for the most recent conflicting action
- * against which we should perform backtracking. This primary includes pairs of
- * synchronizing actions which should be explored in the opposite execution
- * order.
- *
- * @param act The current action
- * @return The most recent action which conflicts with act
- */
-ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
-{
- switch (act->get_type()) {
- case ATOMIC_FENCE:
- /* Only seq-cst fences can (directly) cause backtracking */
- if (!act->is_seqcst())
- break;
- case ATOMIC_READ:
- case ATOMIC_WRITE:
- case ATOMIC_RMW: {
- ModelAction *ret = NULL;
-
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (prev == act)
- continue;
- if (prev->could_synchronize_with(act)) {
- ret = prev;
- break;
- }
- }
-
- ModelAction *ret2 = get_last_fence_conflict(act);
- if (!ret2)
- return ret;
- if (!ret)
- return ret2;
- if (*ret < *ret2)
- return ret2;
- return ret;
- }
- case ATOMIC_LOCK:
- case ATOMIC_TRYLOCK: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (act->is_conflicting_lock(prev))
- return prev;
- }
- break;
- }
- case ATOMIC_UNLOCK: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (!act->same_thread(prev) && prev->is_failed_trylock())
- return prev;
- }
- break;
- }
- case ATOMIC_WAIT: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (!act->same_thread(prev) && prev->is_failed_trylock())
- return prev;
- if (!act->same_thread(prev) && prev->is_notify())
- return prev;
- }
- break;
- }
-
- case ATOMIC_NOTIFY_ALL:
- case ATOMIC_NOTIFY_ONE: {
- /* linear search: from most recent to oldest */
- action_list_t *list = obj_map.get(act->get_location());
- action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- ModelAction *prev = *rit;
- if (!act->same_thread(prev) && prev->is_wait())
- return prev;
- }
- break;
- }
- default:
- break;
- }
- return NULL;
-}
-
-/** This method finds backtracking points where we should try to
- * reorder the parameter ModelAction against.
- *
- * @param the ModelAction to find backtracking points for.
- */
-void ModelExecution::set_backtracking(ModelAction *act)
-{
- Thread *t = get_thread(act);
- ModelAction *prev = get_last_conflict(act);
- if (prev == NULL)
- return;
-
- Node *node = prev->get_node()->get_parent();
-
- /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
- int low_tid, high_tid;
- if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
- low_tid = id_to_int(act->get_tid());
- high_tid = low_tid + 1;
- } else {
- low_tid = 0;
- high_tid = get_num_threads();
- }
-
- for (int i = low_tid; i < high_tid; i++) {
- thread_id_t tid = int_to_id(i);
-
- /* Make sure this thread can be enabled here. */
- if (i >= node->get_num_threads())
- break;
-
- /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
- /* Don't backtrack into a point where the thread is disabled or sleeping. */
- if (node->enabled_status(tid) != THREAD_ENABLED)
- continue;
-
- /* Check if this has been explored already */
- if (node->has_been_explored(tid))
- continue;
-
- /* See if fairness allows */
- if (params->fairwindow != 0 && !node->has_priority(tid)) {
- bool unfair = false;
- for (int t = 0; t < node->get_num_threads(); t++) {
- thread_id_t tother = int_to_id(t);
- if (node->is_enabled(tother) && node->has_priority(tother)) {
- unfair = true;
- break;
- }
- }
- if (unfair)
- continue;
- }
-
- /* See if CHESS-like yield fairness allows */
- if (params->yieldon) {
- bool unfair = false;
- for (int t = 0; t < node->get_num_threads(); t++) {
- thread_id_t tother = int_to_id(t);
- if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
- unfair = true;
- break;
- }
- }
- if (unfair)
- continue;
- }
-
- /* Cache the latest backtracking point */
- set_latest_backtrack(prev);
-
- /* If this is a new backtracking point, mark the tree */
- if (!node->set_backtrack(tid))
- continue;
- DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
- id_to_int(prev->get_tid()),
- id_to_int(t->get_id()));
- if (DBG_ENABLED()) {
- prev->print();
- act->print();
- }
- }
-}
-
-/**
- * @brief Cache the a backtracking point as the "most recent", if eligible
- *
- * Note that this does not prepare the NodeStack for this backtracking
- * operation, it only caches the action on a per-execution basis
- *
- * @param act The operation at which we should explore a different next action
- * (i.e., backtracking point)
- * @return True, if this action is now the most recent backtracking point;
- * false otherwise
- */
-bool ModelExecution::set_latest_backtrack(ModelAction *act)
-{
- if (!priv->next_backtrack || *act > *priv->next_backtrack) {
- priv->next_backtrack = act;
- return true;
- }
- return false;
-}
-
-/**
- * Returns last backtracking point. The model checker will explore a different
- * path for this point in the next execution.
- * @return The ModelAction at which the next execution should diverge.
- */
-ModelAction * ModelExecution::get_next_backtrack()
-{
- ModelAction *next = priv->next_backtrack;
- priv->next_backtrack = NULL;
- return next;
-}
/**
* Processes a read model action.
* @param curr is the read model action to process.
+ * @param rf_set is the set of model actions we can possibly read from
* @return True if processing this read updates the mo_graph.
*/
-bool ModelExecution::process_read(ModelAction *curr)
+bool ModelExecution::process_read(ModelAction *curr, ModelVector<ModelAction *> * rf_set)
{
- Node *node = curr->get_node();
- while (true) {
- bool updated = false;
- switch (node->get_read_from_status()) {
- case READ_FROM_PAST: {
- const ModelAction *rf = node->get_read_from_past();
- ASSERT(rf);
-
- mo_graph->startChanges();
-
- ASSERT(!is_infeasible());
- if (!check_recency(curr, rf)) {
- if (node->increment_read_from()) {
- mo_graph->rollbackChanges();
- continue;
- } else {
- priv->too_many_reads = true;
- }
- }
-
- updated = r_modification_order(curr, rf);
- read_from(curr, rf);
- mo_graph->commitChanges();
- break;
- }
- default:
- ASSERT(false);
- }
- get_thread(curr)->set_return_value(curr->get_return_value());
- return updated;
- }
+ int random_index = random() % rf_set->size();
+ bool updated = false;
+
+ const ModelAction *rf = (*rf_set)[random_index];
+ ASSERT(rf);
+
+ mo_graph->startChanges();
+
+ updated = r_modification_order(curr, rf);
+ read_from(curr, rf);
+ mo_graph->commitChanges();
+ get_thread(curr)->set_return_value(curr->get_return_value());
+ return updated;
}
/**
if (!curr->is_wait())
break; /* The rest is only for ATOMIC_WAIT */
- /* Should we go to sleep? (simulate spurious failures) */
- if (curr->get_node()->get_misc() == 0) {
- get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
- /* disable us */
- scheduler->sleep(get_thread(curr));
- }
break;
}
case ATOMIC_NOTIFY_ALL: {
}
case ATOMIC_NOTIFY_ONE: {
action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
- int wakeupthread = curr->get_node()->get_misc();
+
+ //BCD -- TOFIX FUZZER
+ //THIS SHOULD BE A RANDOM CHOICE
+ // int wakeupthread = curr->get_node()->get_misc();
+ int wakeupthread = 0;
action_list_t::iterator it = waiters->begin();
// WL
/**
* Process a write ModelAction
* @param curr The ModelAction to process
- * @param work The work queue, for adding fixup work
* @return True if the mo_graph was updated or promises were resolved
*/
-bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
+bool ModelExecution::process_write(ModelAction *curr)
{
bool updated_mod_order = w_modification_order(curr);
(*curr)->set_seq_number(get_next_seq_num());
- newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
+ newcurr = node_stack->explore_action(*curr);
if (newcurr) {
/* First restore type and order in case of RMW operation */
if ((*curr)->is_rmwr())
/* Assign most recent release fence */
newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
- /*
- * Perform one-time actions when pushing new ModelAction onto
- * NodeStack
- */
- if (newcurr->is_wait())
- newcurr->get_node()->set_misc_max(2);
- else if (newcurr->is_notify_one()) {
- newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
- }
return true; /* This was a new ModelAction */
}
}
if (!blocking->is_complete()) {
return false;
}
- } else if (params->yieldblock && curr->is_yield()) {
- return false;
}
return true;
wake_up_sleeping_actions(curr);
- /* Compute fairness information for CHESS yield algorithm */
- if (params->yieldon) {
- curr->get_node()->update_yield(scheduler);
- }
-
/* Add the action to lists before any other model-checking tasks */
if (!second_part_of_rmw)
add_action_to_lists(curr);
+ ModelVector<ModelAction *> * rf_set = NULL;
/* Build may_read_from set for newly-created actions */
if (newly_explored && curr->is_read())
- build_may_read_from(curr);
-
- /* Initialize work_queue with the "current action" work */
- work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
- while (!work_queue.empty() && !has_asserted()) {
- WorkQueueEntry work = work_queue.front();
- work_queue.pop_front();
-
- switch (work.type) {
- case WORK_CHECK_CURR_ACTION: {
- ModelAction *act = work.action;
+ rf_set = build_may_read_from(curr);
+
+ process_thread_action(curr);
+
+ if (curr->is_read() && !second_part_of_rmw) {
+ process_read(curr, rf_set);
+ delete rf_set;
+ }
+
+ if (curr->is_write())
+ process_write(curr);
+
+ if (curr->is_fence())
+ process_fence(curr);
+
+ if (curr->is_mutex_op())
+ process_mutex(curr);
- process_thread_action(curr);
-
- if (act->is_read() && !second_part_of_rmw)
- process_read(act);
-
- if (act->is_write())
- process_write(act, &work_queue);
-
- if (act->is_fence())
- process_fence(act);
-
- if (act->is_mutex_op())
- process_mutex(act);
-
- break;
- }
- case WORK_CHECK_MO_EDGES: {
- /** @todo Complete verification of work_queue */
- ModelAction *act = work.action;
-
- if (act->is_read()) {
- const ModelAction *rf = act->get_reads_from();
- r_modification_order(act, rf);
- if (act->is_seqcst()) {
- ModelAction *last_sc_write = get_last_seq_cst_write(act);
- if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
- set_bad_sc_read();
- }
- }
- }
- if (act->is_write()) {
- w_modification_order(act);
- }
- mo_graph->commitChanges();
- break;
- }
- default:
- ASSERT(false);
- break;
- }
- }
-
- check_curr_backtracking(curr);
- set_backtracking(curr);
return curr;
}
-void ModelExecution::check_curr_backtracking(ModelAction *curr)
-{
- Node *currnode = curr->get_node();
- Node *parnode = currnode->get_parent();
-
- if ((parnode && !parnode->backtrack_empty()) ||
- !currnode->misc_empty() ||
- !currnode->read_from_empty()) {
- set_latest_backtrack(curr);
- }
-}
-
/**
* This is the strongest feasibility check available.
* @return whether the current trace (partial or complete) must be a prefix of
return lastread;
}
-/**
- * A helper function for ModelExecution::check_recency, to check if the current
- * thread is able to read from a different write/promise for 'params.maxreads'
- * number of steps and if that write/promise should become visible (i.e., is
- * ordered later in the modification order). This helps model memory liveness.
- *
- * @param curr The current action. Must be a read.
- * @param rf The write/promise from which we plan to read
- * @param other_rf The write/promise from which we may read
- * @return True if we were able to read from other_rf for params.maxreads steps
- */
-template <typename T, typename U>
-bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
-{
- /* Need a different write/promise */
- if (other_rf->equals(rf))
- return false;
-
- /* Only look for "newer" writes/promises */
- if (!mo_graph->checkReachable(rf, other_rf))
- return false;
-
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
- action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
- action_list_t::reverse_iterator rit = list->rbegin();
- ASSERT((*rit) == curr);
- /* Skip past curr */
- rit++;
-
- /* Does this write/promise work for everyone? */
- for (int i = 0; i < params->maxreads; i++, rit++) {
- ModelAction *act = *rit;
- if (!act->may_read_from(other_rf))
- return false;
- }
- return true;
-}
-
-/**
- * Checks whether a thread has read from the same write or Promise for too many
- * times without seeing the effects of a later write/Promise.
- *
- * Basic idea:
- * 1) there must a different write/promise that we could read from,
- * 2) we must have read from the same write/promise in excess of maxreads times,
- * 3) that other write/promise must have been in the reads_from set for maxreads times, and
- * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
- *
- * If so, we decide that the execution is no longer feasible.
- *
- * @param curr The current action. Must be a read.
- * @param rf The ModelAction/Promise from which we might read.
- * @return True if the read should succeed; false otherwise
- */
-template <typename T>
-bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
-{
- if (!params->maxreads)
- return true;
-
- //NOTE: Next check is just optimization, not really necessary....
- if (curr->get_node()->get_read_from_past_size() <= 1)
- return true;
-
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
- int tid = id_to_int(curr->get_tid());
- ASSERT(tid < (int)thrd_lists->size());
- action_list_t *list = &(*thrd_lists)[tid];
- action_list_t::reverse_iterator rit = list->rbegin();
- ASSERT((*rit) == curr);
- /* Skip past curr */
- rit++;
-
- action_list_t::reverse_iterator ritcopy = rit;
- /* See if we have enough reads from the same value */
- for (int count = 0; count < params->maxreads; ritcopy++, count++) {
- if (ritcopy == list->rend())
- return true;
- ModelAction *act = *ritcopy;
- if (!act->is_read())
- return true;
- if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
- return true;
- if (act->get_node()->get_read_from_past_size() <= 1)
- return true;
- }
- for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
- const ModelAction *write = curr->get_node()->get_read_from_past(i);
- if (should_read_instead(curr, rf, write))
- return false; /* liveness failure */
- }
- return true;
-}
-
/**
* @brief Updates the mo_graph with the constraints imposed from the current
* read.
}
}
- /*
- * All compatible, thread-exclusive promises must be ordered after any
- * concrete loads from the same thread
- */
-
return added;
}
* @param release_heads A pass-by-reference style return parameter. After
* execution of this function, release_heads will contain the heads of all the
* relevant release sequences, if any exists with certainty
- * @param pending A pass-by-reference style return parameter which is only used
- * when returning false (i.e., uncertain). Returns most information regarding
- * an uncertain release sequence, including any write operations that might
- * break the sequence.
* @return true, if the ModelExecution is certain that release_heads is complete;
* false otherwise
*/
bool ModelExecution::release_seq_heads(const ModelAction *rf,
- rel_heads_list_t *release_heads,
- struct release_seq *pending) const
+ rel_heads_list_t *release_heads) const
{
/* Only check for release sequences if there are no cycles */
if (mo_graph->checkForCycles())
if (rf->is_acquire() && rf->is_release())
return true; /* complete */
};
- if (!rf) {
- /* read from future: need to settle this later */
- pending->rf = NULL;
- return false; /* incomplete */
- }
+ ASSERT(rf); // Needs to be real write
if (rf->is_release())
return true; /* complete */
if (fence_release)
release_heads->push_back(fence_release);
- int tid = id_to_int(rf->get_tid());
- SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
- action_list_t *list = &(*thrd_lists)[tid];
- action_list_t::const_reverse_iterator rit;
-
- /* Find rf in the thread list */
- rit = std::find(list->rbegin(), list->rend(), rf);
- ASSERT(rit != list->rend());
-
- /* Find the last {write,fence}-release */
- for (; rit != list->rend(); rit++) {
- if (fence_release && *(*rit) < *fence_release)
- break;
- if ((*rit)->is_release())
- break;
- }
- if (rit == list->rend()) {
- /* No write-release in this thread */
- return true; /* complete */
- } else if (fence_release && *(*rit) < *fence_release) {
- /* The fence-release is more recent (and so, "stronger") than
- * the most recent write-release */
- return true; /* complete */
- } /* else, need to establish contiguous release sequence */
- ModelAction *release = *rit;
-
- ASSERT(rf->same_thread(release));
-
- pending->writes.clear();
-
- bool certain = true;
- for (unsigned int i = 0; i < thrd_lists->size(); i++) {
- if (id_to_int(rf->get_tid()) == (int)i)
- continue;
- list = &(*thrd_lists)[i];
-
- /* Can we ensure no future writes from this thread may break
- * the release seq? */
- bool future_ordered = false;
-
- ModelAction *last = get_last_action(int_to_id(i));
- Thread *th = get_thread(int_to_id(i));
- if ((last && rf->happens_before(last)) ||
- !is_enabled(th) ||
- th->is_complete())
- future_ordered = true;
-
- ASSERT(!th->is_model_thread() || future_ordered);
-
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
- const ModelAction *act = *rit;
- /* Reach synchronization -> this thread is complete */
- if (act->happens_before(release))
- break;
- if (rf->happens_before(act)) {
- future_ordered = true;
- continue;
- }
-
- /* Only non-RMW writes can break release sequences */
- if (!act->is_write() || act->is_rmw())
- continue;
-
- /* Check modification order */
- if (mo_graph->checkReachable(rf, act)) {
- /* rf --mo--> act */
- future_ordered = true;
- continue;
- }
- if (mo_graph->checkReachable(act, release))
- /* act --mo--> release */
- break;
- if (mo_graph->checkReachable(release, act) &&
- mo_graph->checkReachable(act, rf)) {
- /* release --mo-> act --mo--> rf */
- return true; /* complete */
- }
- /* act may break release sequence */
- pending->writes.push_back(act);
- certain = false;
- }
- if (!future_ordered)
- certain = false; /* This thread is uncertain */
- }
-
- if (certain) {
- release_heads->push_back(release);
- pending->writes.clear();
- } else {
- pending->release = release;
- pending->rf = rf;
- }
- return certain;
+ return true; /* complete */
}
/**
ModelAction *read, rel_heads_list_t *release_heads)
{
const ModelAction *rf = read->get_reads_from();
- struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
- sequence->acquire = acquire;
- sequence->read = read;
-
- if (!release_seq_heads(rf, release_heads, sequence)) {
- /* add act to 'lazy checking' list */
- pending_rel_seqs.push_back(sequence);
- } else {
- snapshot_free(sequence);
- }
-}
-
-/**
- * @brief Propagate a modified clock vector to actions later in the execution
- * order
- *
- * After an acquire operation lazily completes a release-sequence
- * synchronization, we must update all clock vectors for operations later than
- * the acquire in the execution order.
- *
- * @param acquire The ModelAction whose clock vector must be propagated
- * @param work The work queue to which we can add work items, if this
- * propagation triggers more updates (e.g., to the modification order)
- */
-void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
-{
- /* Re-check read-acquire for mo_graph edges */
- work->push_back(MOEdgeWorkEntry(acquire));
-
- /* propagate synchronization to later actions */
- action_list_t::reverse_iterator rit = action_trace.rbegin();
- for (; (*rit) != acquire; rit++) {
- ModelAction *propagate = *rit;
- if (acquire->happens_before(propagate)) {
- synchronize(acquire, propagate);
- /* Re-check 'propagate' for mo_graph edges */
- work->push_back(MOEdgeWorkEntry(propagate));
- }
- }
-}
-
-/**
- * Attempt to resolve all stashed operations that might synchronize with a
- * release sequence for a given location. This implements the "lazy" portion of
- * determining whether or not a release sequence was contiguous, since not all
- * modification order information is present at the time an action occurs.
- *
- * @param location The location/object that should be checked for release
- * sequence resolutions. A NULL value means to check all locations.
- * @param work_queue The work queue to which to add work items as they are
- * generated
- * @return True if any updates occurred (new synchronization, new mo_graph
- * edges)
- */
-bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
-{
- bool updated = false;
- SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
- while (it != pending_rel_seqs.end()) {
- struct release_seq *pending = *it;
- ModelAction *acquire = pending->acquire;
- const ModelAction *read = pending->read;
-
- /* Only resolve sequences on the given location, if provided */
- if (location && read->get_location() != location) {
- it++;
- continue;
- }
- const ModelAction *rf = read->get_reads_from();
- rel_heads_list_t release_heads;
- bool complete;
- complete = release_seq_heads(rf, &release_heads, pending);
- for (unsigned int i = 0; i < release_heads.size(); i++)
- if (!acquire->has_synchronized_with(release_heads[i]))
- if (synchronize(release_heads[i], acquire))
- updated = true;
-
- if (updated) {
- /* Propagate the changed clock vector */
- propagate_clockvector(acquire, work_queue);
- }
- if (complete) {
- it = pending_rel_seqs.erase(it);
- snapshot_free(pending);
- } else {
- it++;
- }
- }
-
- // If we resolved promises or data races, see if we have realized a data race.
- checkDataRaces();
-
- return updated;
+ release_seq_heads(rf, release_heads);
}
/**
* @param curr is the current ModelAction that we are exploring; it must be a
* 'read' operation.
*/
-void ModelExecution::build_may_read_from(ModelAction *curr)
+ModelVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
{
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
unsigned int i;
if (curr->is_seqcst())
last_sc_write = get_last_seq_cst_write(curr);
+ ModelVector<ModelAction *> * rf_set = new ModelVector<ModelAction *>();
+
/* Iterate over all threads */
for (i = 0; i < thrd_lists->size(); i++) {
/* Iterate over actions in thread, starting from most recent */
if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
allow_read = false;
- else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
- allow_read = false;
if (allow_read) {
/* Only add feasible reads */
mo_graph->startChanges();
r_modification_order(curr, act);
if (!is_infeasible())
- curr->get_node()->add_read_from_past(act);
+ rf_set->push_back(act);
mo_graph->rollbackChanges();
}
}
}
- /* We may find no valid may-read-from only if the execution is doomed */
- if (!curr->get_node()->read_from_size()) {
- priv->no_valid_reads = true;
- set_assert();
- }
-
if (DBG_ENABLED()) {
model_print("Reached read action:\n");
curr->print();
- model_print("Printing read_from_past\n");
- curr->get_node()->print_read_from_past();
model_print("End printing read_from_past\n");
}
-}
-
-bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
-{
- for ( ; write != NULL; write = write->get_reads_from()) {
- /* UNINIT actions don't have a Node, and they never sleep */
- if (write->is_uninitialized())
- return true;
- Node *prevnode = write->get_node()->get_parent();
-
- bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
- if (write->is_release() && thread_sleep)
- return true;
- if (!write->is_rmw())
- return false;
- }
- return true;
+ return rf_set;
}
/**
model_print("Execution trace %d:", get_execution_number());
if (isfeasibleprefix()) {
- if (is_yieldblocked())
- model_print(" YIELD BLOCKED");
if (scheduler->all_threads_sleeping())
model_print(" SLEEP-SET REDUNDANT");
if (have_bug_reports())
if (curr->is_rmwr())
return get_thread(curr);
if (curr->is_write()) {
-// std::memory_order order = curr->get_mo();
-// switch(order) {
-// case std::memory_order_relaxed:
-// return get_thread(curr);
-// case std::memory_order_release:
-// return get_thread(curr);
-// defalut:
-// return NULL;
-// }
- return NULL;
+ std::memory_order order = curr->get_mo();
+ switch(order) {
+ case std::memory_order_relaxed:
+ return get_thread(curr);
+ case std::memory_order_release:
+ return get_thread(curr);
+ default:
+ return NULL;
+ }
}
/* Follow CREATE with the created thread */
#include "mymemory.h"
#include "hashtable.h"
-#include "workqueue.h"
#include "config.h"
#include "modeltypes.h"
#include "stl-model.h"
void print_infeasibility(const char *prefix) const;
bool is_infeasible() const;
bool is_deadlocked() const;
- bool is_yieldblocked() const;
bool too_many_steps() const;
- ModelAction * get_next_backtrack();
-
action_list_t * get_action_trace() { return &action_trace; }
CycleGraph * const get_mo_graph() { return mo_graph; }
SNAPSHOTALLOC
private:
int get_execution_number() const;
- pthread_t pthread_counter;
ModelChecker *model;
/** The scheduler to use: tracks the running/ready Threads */
Scheduler * const scheduler;
- bool sleep_can_read_from(ModelAction *curr, const ModelAction *write);
- bool thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const;
bool mo_may_allow(const ModelAction *writer, const ModelAction *reader);
void set_bad_synchronization();
void set_bad_sc_read();
bool next_execution();
ModelAction * check_current_action(ModelAction *curr);
bool initialize_curr_action(ModelAction **curr);
- bool process_read(ModelAction *curr);
- bool process_write(ModelAction *curr, work_queue_t *work);
+ bool process_read(ModelAction *curr, ModelVector<ModelAction *> * rf_set);
+ bool process_write(ModelAction *curr);
bool process_fence(ModelAction *curr);
bool process_mutex(ModelAction *curr);
bool read_from(ModelAction *act, const ModelAction *rf);
bool synchronize(const ModelAction *first, ModelAction *second);
- template <typename T>
- bool check_recency(ModelAction *curr, const T *rf) const;
-
- template <typename T, typename U>
- bool should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const;
-
- ModelAction * get_last_fence_conflict(ModelAction *act) const;
- ModelAction * get_last_conflict(ModelAction *act) const;
- void set_backtracking(ModelAction *act);
- bool set_latest_backtrack(ModelAction *act);
-
- void check_curr_backtracking(ModelAction *curr);
void add_action_to_lists(ModelAction *act);
ModelAction * get_last_fence_release(thread_id_t tid) const;
ModelAction * get_last_seq_cst_write(ModelAction *curr) const;
ModelAction * get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const;
ModelAction * get_last_unlock(ModelAction *curr) const;
- void build_may_read_from(ModelAction *curr);
+ ModelVector<ModelAction *> * build_may_read_from(ModelAction *curr);
ModelAction * process_rmw(ModelAction *curr);
template <typename rf_type>
bool w_modification_order(ModelAction *curr);
void get_release_seq_heads(ModelAction *acquire, ModelAction *read, rel_heads_list_t *release_heads);
- bool release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads, struct release_seq *pending) const;
- void propagate_clockvector(ModelAction *acquire, work_queue_t *work);
- bool resolve_release_sequences(void *location, work_queue_t *work_queue);
+ bool release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads) const;
ModelAction * get_uninitialized_action(const ModelAction *curr) const;
action_list_t action_trace;
SnapVector<Thread *> thread_map;
SnapVector<Thread *> pthread_map;
+ pthread_t pthread_counter;
/** Per-object list of actions. Maps an object (i.e., memory location)
* to a trace of all actions performed on the object. */
* are established. Each entry in the list may only be partially
* filled, depending on its pending status.
*/
- SnapVector<struct release_seq *> pending_rel_seqs;
SnapVector<ModelAction *> thrd_last_action;
SnapVector<ModelAction *> thrd_last_fence_release;
static void param_defaults(struct model_params *params)
{
- params->maxreads = 0;
- params->fairwindow = 0;
- params->yieldon = false;
- params->yieldblock = false;
params->enabledcount = 1;
params->bound = 0;
params->verbose = !!DBG_ENABLED();
"\n"
"Model-checker options:\n"
"-h, --help Display this help message and exit\n"
-"-m, --liveness=NUM Maximum times a thread can read from the same write\n"
-" while other writes exist.\n"
-" Default: %d\n"
-"-y, --yield Enable CHESS-like yield-based fairness support\n"
-" (requires thrd_yield() in test program).\n"
-" Default: %s\n"
-"-Y, --yieldblock Prohibit an execution from running a yield.\n"
-" Default: %s\n"
-"-f, --fairness=WINDOW Specify a fairness window in which actions that are\n"
-" enabled sufficiently many times should receive\n"
-" priority for execution (not recommended).\n"
-" Default: %d\n"
"-e, --enabled=COUNT Enabled count.\n"
" Default: %d\n"
"-b, --bound=MAX Upper length bound.\n"
" -o help for a list of options\n"
" -- Program arguments follow.\n\n",
program_name,
- params->maxreads,
- params->yieldon ? "enabled" : "disabled",
- params->yieldblock ? "enabled" : "disabled",
- params->fairwindow,
params->enabledcount,
params->bound,
params->verbose,
static void parse_options(struct model_params *params, int argc, char **argv)
{
- const char *shortopts = "hyYt:o:m:f:e:b:u:x:v::";
+ const char *shortopts = "ht:o:e:b:u:x:v::";
const struct option longopts[] = {
{"help", no_argument, NULL, 'h'},
- {"liveness", required_argument, NULL, 'm'},
- {"fairness", required_argument, NULL, 'f'},
- {"yield", no_argument, NULL, 'y'},
- {"yieldblock", no_argument, NULL, 'Y'},
{"enabled", required_argument, NULL, 'e'},
{"bound", required_argument, NULL, 'b'},
{"verbose", optional_argument, NULL, 'v'},
case 'x':
params->maxexecutions = atoi(optarg);
break;
- case 'f':
- params->fairwindow = atoi(optarg);
- break;
case 'e':
params->enabledcount = atoi(optarg);
break;
case 'b':
params->bound = atoi(optarg);
break;
- case 'm':
- params->maxreads = atoi(optarg);
- break;
case 'v':
params->verbose = optarg ? atoi(optarg) : 1;
break;
case 'u':
params->uninitvalue = atoi(optarg);
break;
- case 'y':
- params->yieldon = true;
- break;
/** case 't':
if (install_plugin(optarg))
error = true;
}
break;
*/
- case 'Y':
- params->yieldblock = true;
- break;
default: /* '?' */
error = true;
break;
*/
Thread * ModelChecker::get_next_thread()
{
- thread_id_t tid;
/*
* Have we completed exploring the preselected path? Then let the
* scheduler decide
*/
- if (diverge == NULL) // W: diverge is set to NULL permanently
- return scheduler->select_next_thread(node_stack->get_head());
-
- /* Else, we are trying to replay an execution */
- ModelAction *next = node_stack->get_next()->get_action();
-
- if (next == diverge) {
- if (earliest_diverge == NULL || *diverge < *earliest_diverge)
- earliest_diverge = diverge;
-
- Node *nextnode = next->get_node();
- Node *prevnode = nextnode->get_parent();
- scheduler->update_sleep_set(prevnode);
-
- /* Reached divergence point */
- if (nextnode->increment_behaviors()) {
- /* Execute the same thread with a new behavior */
- tid = next->get_tid();
- node_stack->pop_restofstack(2);
- } else {
- ASSERT(prevnode);
- /* Make a different thread execute for next step */
- scheduler->add_sleep(get_thread(next->get_tid()));
- tid = prevnode->get_next_backtrack();
- /* Make sure the backtracked thread isn't sleeping. */
- node_stack->pop_restofstack(1);
- if (diverge == earliest_diverge) {
- earliest_diverge = prevnode->get_action();
- }
- }
- /* Start the round robin scheduler from this thread id */
- scheduler->set_scheduler_thread(tid);
- /* The correct sleep set is in the parent node. */
- execute_sleep_set();
-
- DEBUG("*** Divergence point ***\n");
-
- diverge = NULL;
- } else {
- tid = next->get_tid();
- }
- DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
- ASSERT(tid != THREAD_ID_T_NONE);
- return get_thread(id_to_int(tid));
+ return scheduler->select_next_thread(node_stack->get_head());
}
/**
// test code
execution_number++;
reset_to_initial_state();
- node_stack->pop_restofstack(2);
-// node_stack->full_reset();
+ node_stack->full_reset();
diverge = NULL;
return false;
/* test
void ModelChecker::run()
{
int i = 0;
+ //Need to initial random number generator state to avoid resets on rollback
+ char random_state[256];
+ initstate(423121, random_state, sizeof(random_state));
do {
thrd_t user_thread;
Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
execution->add_thread(t);
+ //Need to seed random number generator, otherwise its state gets reset
do {
/*
* Stash next pending action(s) for thread(s). There
t->set_pending(NULL);
t = execution->take_step(curr);
} while (!should_terminate_execution());
-
next_execution();
i++;
- } while (i<2); // while (has_next);
-
+ //restore random number generator state after rollback
+ setstate(random_state);
+ } while (i<5); // while (has_next);
model_print("******* Model-checking complete: *******\n");
print_stats();
* as an empty stub, to represent the first thread "choice") and up to one
* parent.
*
- * @param params The model-checker parameters
* @param act The ModelAction to associate with this Node. May be NULL.
* @param par The parent Node in the NodeStack. May be NULL if there is no
* parent.
* @param nthreads The number of threads which exist at this point in the
* execution trace.
*/
-Node::Node(const struct model_params *params, ModelAction *act, Node *par,
- int nthreads, Node *prevfairness) :
- read_from_status(READ_FROM_PAST),
+Node::Node(ModelAction *act, Node *par, int nthreads) :
action(act),
- params(params),
uninit_action(NULL),
parent(par),
- num_threads(nthreads),
- explored_children(num_threads),
- backtrack(num_threads),
- fairness(num_threads),
- numBacktracks(0),
- enabled_array(NULL),
- read_from_past(),
- read_from_past_idx(0),
- misc_index(0),
- misc_max(0),
- yield_data(NULL)
+ num_threads(nthreads)
{
ASSERT(act);
act->set_node(this);
- int currtid = id_to_int(act->get_tid());
- int prevtid = prevfairness ? id_to_int(prevfairness->action->get_tid()) : 0;
-
- if (get_params()->fairwindow != 0) {
- for (int i = 0; i < num_threads; i++) {
- ASSERT(i < ((int)fairness.size()));
- struct fairness_info *fi = &fairness[i];
- struct fairness_info *prevfi = (parent && i < parent->get_num_threads()) ? &parent->fairness[i] : NULL;
- if (prevfi) {
- *fi = *prevfi;
- }
- if (parent && parent->is_enabled(int_to_id(i))) {
- fi->enabled_count++;
- }
- if (i == currtid) {
- fi->turns++;
- fi->priority = false;
- }
- /* Do window processing */
- if (prevfairness != NULL) {
- if (prevfairness->parent->is_enabled(int_to_id(i)))
- fi->enabled_count--;
- if (i == prevtid) {
- fi->turns--;
- }
- /* Need full window to start evaluating
- * conditions
- * If we meet the enabled count and have no
- * turns, give us priority */
- if ((fi->enabled_count >= get_params()->enabledcount) &&
- (fi->turns == 0))
- fi->priority = true;
- }
- }
- }
-}
-
-int Node::get_yield_data(int tid1, int tid2) const {
- if (tid1<num_threads && tid2 < num_threads)
- return yield_data[YIELD_INDEX(tid1,tid2,num_threads)];
- else
- return YIELD_S | YIELD_D;
-}
-
-void Node::update_yield(Scheduler * scheduler) {
- if (yield_data==NULL)
- yield_data=(int *) model_calloc(1, sizeof(int)*num_threads*num_threads);
- //handle base case
- if (parent == NULL) {
- for(int i = 0; i < num_threads*num_threads; i++) {
- yield_data[i] = YIELD_S | YIELD_D;
- }
- return;
- }
- int curr_tid=id_to_int(action->get_tid());
-
- for(int u = 0; u < num_threads; u++) {
- for(int v = 0; v < num_threads; v++) {
- int yield_state=parent->get_yield_data(u, v);
- bool next_enabled=scheduler->is_enabled(int_to_id(v));
- bool curr_enabled=parent->is_enabled(int_to_id(v));
- if (!next_enabled) {
- //Compute intersection of ES and E
- yield_state&=~YIELD_E;
- //Check to see if we disabled the thread
- if (u==curr_tid && curr_enabled)
- yield_state|=YIELD_D;
- }
- yield_data[YIELD_INDEX(u, v, num_threads)]=yield_state;
- }
- yield_data[YIELD_INDEX(u, curr_tid, num_threads)]=(yield_data[YIELD_INDEX(u, curr_tid, num_threads)]&~YIELD_P)|YIELD_S;
- }
- //handle curr.yield(t) part of computation
- if (action->is_yield()) {
- for(int v = 0; v < num_threads; v++) {
- int yield_state=yield_data[YIELD_INDEX(curr_tid, v, num_threads)];
- if ((yield_state & (YIELD_E | YIELD_D)) && (!(yield_state & YIELD_S)))
- yield_state |= YIELD_P;
- yield_state &= YIELD_P;
- if (scheduler->is_enabled(int_to_id(v))) {
- yield_state|=YIELD_E;
- }
- yield_data[YIELD_INDEX(curr_tid, v, num_threads)]=yield_state;
- }
- }
}
/** @brief Node desctructor */
delete action;
if (uninit_action)
delete uninit_action;
- if (enabled_array)
- model_free(enabled_array);
- if (yield_data)
- model_free(yield_data);
}
/** Prints debugging info for the ModelAction associated with this Node */
void Node::print() const
{
action->print();
- model_print(" thread status: ");
- if (enabled_array) {
- for (int i = 0; i < num_threads; i++) {
- char str[20];
- enabled_type_to_string(enabled_array[i], str);
- model_print("[%d: %s]", i, str);
- }
- model_print("\n");
- } else
- model_print("(info not available)\n");
- model_print(" backtrack: %s", backtrack_empty() ? "empty" : "non-empty ");
- for (int i = 0; i < (int)backtrack.size(); i++)
- if (backtrack[i] == true)
- model_print("[%d]", i);
- model_print("\n");
-
- model_print(" read from past: %s", read_from_past_empty() ? "empty" : "non-empty ");
- for (int i = read_from_past_idx + 1; i < (int)read_from_past.size(); i++)
- model_print("[%d]", read_from_past[i]->get_seq_number());
- model_print("\n");
-
- model_print(" misc: %s\n", misc_empty() ? "empty" : "non-empty");
-}
-
-/****************************** threads backtracking **************************/
-
-/**
- * Checks if the Thread associated with this thread ID has been explored from
- * this Node already.
- * @param tid is the thread ID to check
- * @return true if this thread choice has been explored already, false
- * otherwise
- */
-bool Node::has_been_explored(thread_id_t tid) const
-{
- int id = id_to_int(tid);
- return explored_children[id];
-}
-
-/**
- * Checks if the backtracking set is empty.
- * @return true if the backtracking set is empty
- */
-bool Node::backtrack_empty() const
-{
- return (numBacktracks == 0);
-}
-
-void Node::explore(thread_id_t tid)
-{
- int i = id_to_int(tid);
- ASSERT(i < ((int)backtrack.size()));
- if (backtrack[i]) {
- backtrack[i] = false;
- numBacktracks--;
- }
- explored_children[i] = true;
-}
-
-/**
- * Mark the appropriate backtracking information for exploring a thread choice.
- * @param act The ModelAction to explore
- */
-void Node::explore_child(ModelAction *act, enabled_type_t *is_enabled)
-{
- if (!enabled_array)
- enabled_array = (enabled_type_t *)model_malloc(sizeof(enabled_type_t) * num_threads);
- if (is_enabled != NULL)
- memcpy(enabled_array, is_enabled, sizeof(enabled_type_t) * num_threads);
- else {
- for (int i = 0; i < num_threads; i++)
- enabled_array[i] = THREAD_DISABLED;
- }
-
- explore(act->get_tid());
-}
-
-/**
- * Records a backtracking reference for a thread choice within this Node.
- * Provides feedback as to whether this thread choice is already set for
- * backtracking.
- * @return false if the thread was already set to be backtracked, true
- * otherwise
- */
-bool Node::set_backtrack(thread_id_t id)
-{
- int i = id_to_int(id);
- ASSERT(i < ((int)backtrack.size()));
- if (backtrack[i])
- return false;
- backtrack[i] = true;
- numBacktracks++;
- return true;
-}
-
-thread_id_t Node::get_next_backtrack()
-{
- /** @todo Find next backtrack */
- unsigned int i;
- for (i = 0; i < backtrack.size(); i++)
- if (backtrack[i] == true)
- break;
- /* Backtrack set was empty? */
- ASSERT(i != backtrack.size());
-
- backtrack[i] = false;
- numBacktracks--;
- return int_to_id(i);
-}
-
-void Node::clear_backtracking()
-{
- for (unsigned int i = 0; i < backtrack.size(); i++)
- backtrack[i] = false;
- for (unsigned int i = 0; i < explored_children.size(); i++)
- explored_children[i] = false;
- numBacktracks = 0;
-}
-
-/************************** end threads backtracking **************************/
-
-void Node::set_misc_max(int i)
-{
- misc_max = i;
-}
-
-int Node::get_misc() const
-{
- return misc_index;
-}
-
-bool Node::increment_misc()
-{
- return (misc_index < misc_max) && ((++misc_index) < misc_max);
-}
-
-bool Node::misc_empty() const
-{
- return (misc_index + 1) >= misc_max;
-}
-
-bool Node::is_enabled(Thread *t) const
-{
- int thread_id = id_to_int(t->get_id());
- return thread_id < num_threads && (enabled_array[thread_id] != THREAD_DISABLED);
-}
-
-enabled_type_t Node::enabled_status(thread_id_t tid) const
-{
- int thread_id = id_to_int(tid);
- if (thread_id < num_threads)
- return enabled_array[thread_id];
- else
- return THREAD_DISABLED;
-}
-
-bool Node::is_enabled(thread_id_t tid) const
-{
- int thread_id = id_to_int(tid);
- return thread_id < num_threads && (enabled_array[thread_id] != THREAD_DISABLED);
-}
-
-bool Node::has_priority(thread_id_t tid) const
-{
- return fairness[id_to_int(tid)].priority;
-}
-
-bool Node::has_priority_over(thread_id_t tid1, thread_id_t tid2) const
-{
- return get_yield_data(id_to_int(tid1), id_to_int(tid2)) & YIELD_P;
-}
-
-/*********************************** read from ********************************/
-
-/**
- * Get the current state of the may-read-from set iteration
- * @return The read-from type we should currently be checking (past)
- */
-read_from_type_t Node::get_read_from_status()
-{
- if (read_from_status == READ_FROM_PAST && read_from_past.empty())
- increment_read_from();
- return read_from_status;
-}
-
-/**
- * Iterate one step in the may-read-from iteration. This includes a step in
- * reading from the either the past or the future.
- * @return True if there is a new read-from to explore; false otherwise
- */
-bool Node::increment_read_from()
-{
- if (increment_read_from_past()) {
- read_from_status = READ_FROM_PAST;
- return true;
- }
- read_from_status = READ_FROM_NONE;
- return false;
-}
-
-/**
- * @return True if there are any new read-froms to explore
- */
-bool Node::read_from_empty() const
-{
- return read_from_past_empty();
-}
-
-/**
- * Get the total size of the may-read-from set, including both past
- * @return The size of may-read-from
- */
-unsigned int Node::read_from_size() const
-{
- return read_from_past.size();
-}
-
-/******************************* end read from ********************************/
-
-/****************************** read from past ********************************/
-
-/** @brief Prints info about read_from_past set */
-void Node::print_read_from_past()
-{
- for (unsigned int i = 0; i < read_from_past.size(); i++)
- read_from_past[i]->print();
-}
-
-/**
- * Add an action to the read_from_past set.
- * @param act is the action to add
- */
-void Node::add_read_from_past(const ModelAction *act)
-{
- read_from_past.push_back(act);
-}
-
-/**
- * Gets the next 'read_from_past' action from this Node. Only valid for a node
- * where this->action is a 'read'.
- * @return The first element in read_from_past
- */
-const ModelAction * Node::get_read_from_past() const
-{
- if (read_from_past_idx < read_from_past.size()) {
- int random_index = rand() % read_from_past.size();
- return read_from_past[random_index];
- }
-// return read_from_past[read_from_past_idx];
- else
- return NULL;
-}
-
-const ModelAction * Node::get_read_from_past(int i) const
-{
- return read_from_past[i];
-}
-
-int Node::get_read_from_past_size() const
-{
- return read_from_past.size();
-}
-
-/**
- * Checks whether the readsfrom set for this node is empty.
- * @return true if the readsfrom set is empty.
- */
-bool Node::read_from_past_empty() const
-{
- return ((read_from_past_idx + 1) >= read_from_past.size());
-}
-
-/**
- * Increments the index into the readsfrom set to explore the next item.
- * @return Returns false if we have explored all items.
- */
-bool Node::increment_read_from_past()
-{
- DBG();
- if (read_from_past_idx < read_from_past.size()) {
- read_from_past_idx++;
- return read_from_past_idx < read_from_past.size();
- }
- return false;
-}
-
-/************************** end read from past ********************************/
-
-
-/**
- * Increments some behavior's index, if a new behavior is available
- * @return True if there is a new behavior available; otherwise false
- */
-bool Node::increment_behaviors()
-{
- /* satisfy a different misc_index values */
- if (increment_misc())
- return true;
- /* read from a different value */
- if (increment_read_from())
- return true;
- return false;
}
NodeStack::NodeStack() :
/** Note: The is_enabled set contains what actions were enabled when
* act was chosen. */
-ModelAction * NodeStack::explore_action(ModelAction *act, enabled_type_t *is_enabled)
+ModelAction * NodeStack::explore_action(ModelAction *act)
{
DBG();
- if ((head_idx + 1) < (int)node_list.size()) {
- head_idx++;
- return node_list[head_idx]->get_action();
- }
-
/* Record action */
Node *head = get_head();
- Node *prevfairness = NULL;
- if (head) {
- head->explore_child(act, is_enabled);
- if (get_params()->fairwindow != 0 && head_idx > (int)get_params()->fairwindow)
- prevfairness = node_list[head_idx - get_params()->fairwindow];
- }
int next_threads = execution->get_num_threads();
if (act->get_type() == THREAD_CREATE || act->get_type() == PTHREAD_CREATE ) // may need to be changed
next_threads++;
- node_list.push_back(new Node(get_params(), act, head, next_threads, prevfairness));
+ node_list.push_back(new Node(act, head, next_threads));
total_nodes++;
head_idx++;
return NULL;
}
-/**
- * Empties the stack of all trailing nodes after a given position and calls the
- * destructor for each. This function is provided an offset which determines
- * how many nodes (relative to the current replay state) to save before popping
- * the stack.
- * @param numAhead gives the number of Nodes (including this Node) to skip over
- * before removing nodes.
- */
-void NodeStack::pop_restofstack(int numAhead)
-{
- /* Diverging from previous execution; clear out remainder of list */
- unsigned int it = head_idx + numAhead;
- for (unsigned int i = it; i < node_list.size(); i++)
- delete node_list[i];
- node_list.resize(it);
- node_list.back()->clear_backtracking();
-}
/** Reset the node stack. */
void NodeStack::full_reset()
bool priority;
};
-/**
- * @brief Types of read-from relations
- *
- * Our "may-read-from" set is composed of multiple types of reads, and we have
- * to iterate through all of them in the backtracking search. This enumeration
- * helps to identify which type of read-from we are currently observing.
- */
-typedef enum {
- READ_FROM_PAST, /**< @brief Read from a prior, existing store */
- READ_FROM_NONE, /**< @brief A NULL state, which should not be reached */
-} read_from_type_t;
-
-#define YIELD_E 1
-#define YIELD_D 2
-#define YIELD_S 4
-#define YIELD_P 8
-#define YIELD_INDEX(tid1, tid2, num_threads) (tid1*num_threads+tid2)
-
/**
* @brief A single node in a NodeStack
*/
class Node {
public:
- Node(const struct model_params *params, ModelAction *act, Node *par,
- int nthreads, Node *prevfairness);
+ Node(ModelAction *act, Node *par,
+ int nthreads);
~Node();
- /* return true = thread choice has already been explored */
- bool has_been_explored(thread_id_t tid) const;
- /* return true = backtrack set is empty */
- bool backtrack_empty() const;
-
- void clear_backtracking();
- void explore_child(ModelAction *act, enabled_type_t *is_enabled);
- /* return false = thread was already in backtrack */
- bool set_backtrack(thread_id_t id);
- thread_id_t get_next_backtrack();
+
bool is_enabled(Thread *t) const;
bool is_enabled(thread_id_t tid) const;
- enabled_type_t enabled_status(thread_id_t tid) const;
ModelAction * get_action() const { return action; }
void set_uninit_action(ModelAction *act) { uninit_action = act; }
ModelAction * get_uninit_action() const { return uninit_action; }
- bool has_priority(thread_id_t tid) const;
- void update_yield(Scheduler *);
- bool has_priority_over(thread_id_t tid, thread_id_t tid2) const;
int get_num_threads() const { return num_threads; }
/** @return the parent Node to this Node; that is, the action that
* occurred previously in the stack. */
Node * get_parent() const { return parent; }
- read_from_type_t get_read_from_status();
- bool increment_read_from();
- bool read_from_empty() const;
- unsigned int read_from_size() const;
-
- void print_read_from_past();
- void add_read_from_past(const ModelAction *act);
- const ModelAction * get_read_from_past() const;
- const ModelAction * get_read_from_past(int i) const;
- int get_read_from_past_size() const;
-
- enabled_type_t *get_enabled_array() {return enabled_array;}
- void set_misc_max(int i);
- int get_misc() const;
- bool increment_misc();
- bool misc_empty() const;
-
- bool increment_behaviors();
void print() const;
MEMALLOC
private:
- void explore(thread_id_t tid);
- int get_yield_data(int tid1, int tid2) const;
- bool read_from_past_empty() const;
- bool increment_read_from_past();
- read_from_type_t read_from_status;
- const struct model_params * get_params() const { return params; }
-
ModelAction * const action;
- const struct model_params * const params;
-
/** @brief ATOMIC_UNINIT action which was created at this Node */
ModelAction *uninit_action;
-
Node * const parent;
const int num_threads;
- ModelVector<bool> explored_children;
- ModelVector<bool> backtrack;
- ModelVector<struct fairness_info> fairness;
- int numBacktracks;
- enabled_type_t *enabled_array;
-
- /**
- * The set of past ModelActions that this the action at this Node may
- * read from. Only meaningful if this Node represents a 'read' action.
- */
- ModelVector<const ModelAction *> read_from_past;
- unsigned int read_from_past_idx;
-
- int misc_index;
- int misc_max;
- int * yield_data;
};
typedef ModelVector<Node *> node_list_t;
void register_engine(const ModelExecution *exec);
- ModelAction * explore_action(ModelAction *act, enabled_type_t * is_enabled);
+ ModelAction * explore_action(ModelAction *act);
Node * get_head() const;
Node * get_next() const;
void reset_execution();
- void pop_restofstack(int numAhead);
void full_reset();
int get_total_nodes() { return total_nodes; }
* the model checker.
*/
struct model_params {
- int maxreads;
- bool yieldon;
- bool yieldblock;
- unsigned int fairwindow;
unsigned int enabledcount;
unsigned int bound;
unsigned int uninitvalue;
static void param_defaults(struct model_params *params)
{
- params->maxreads = 0;
- params->fairwindow = 0;
- params->yieldon = false;
- params->yieldblock = false;
params->enabledcount = 1;
params->bound = 0;
params->verbose = !!DBG_ENABLED();
cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
- model->switch_to_master(new ModelAction(NOOP, std::memory_order_seq_cst, v, NULL));
+ model->switch_to_master(new ModelAction(NOOP, std::memory_order_seq_cst, v));
// v->wait(*m);
// printf("timed_wait called\n");
return 0;
return enabled[id];
}
-void Scheduler::update_sleep_set(Node *n) {
- enabled_type_t *enabled_array = n->get_enabled_array();
- for (int i = 0; i < enabled_len; i++) {
- if (enabled_array[i] == THREAD_SLEEP_SET) {
- enabled[i] = THREAD_SLEEP_SET;
- }
- }
-}
-
/**
* Add a Thread to the sleep set.
* @param t The Thread to add
*/
Thread * Scheduler::select_next_thread(Node *n)
{
- bool have_enabled_thread_with_priority = false;
- if (model->params.fairwindow != 0) {
- for (int i = 0; i < enabled_len; i++) {
- thread_id_t tid = int_to_id(i);
- if (n->has_priority(tid)) {
- DEBUG("Node (tid %d) has priority\n", i);
- if (enabled[i] != THREAD_DISABLED)
- have_enabled_thread_with_priority = true;
- }
- }
- }
-
int avail_threads = enabled_len; // number of available threads
int thread_list[enabled_len]; // keep a list of threads to select from
for (int i = 0; i< enabled_len; i++){
}
while (avail_threads > 0) {
- int random_index = rand() % avail_threads;
+ int random_index = random() % avail_threads;
curr_thread_index = thread_list[random_index]; // randomly select a thread from available threads
// curr_thread_index = (curr_thread_index + i + 1) % enabled_len;
thread_id_t curr_tid = int_to_id(curr_thread_index);
- if (model->params.yieldon) {
- bool bad_thread = false;
- for (int j = 0; j < enabled_len; j++) {
- thread_id_t tother = int_to_id(j);
- if ((enabled[j] != THREAD_DISABLED) && n->has_priority_over(curr_tid, tother)) {
- bad_thread=true;
- break;
- }
- }
- if (bad_thread) {
- thread_list[random_index] = thread_list[avail_threads - 1]; // remove this threads from available threads
- avail_threads--;
-
- continue;
- }
- }
- if (enabled[curr_thread_index] == THREAD_ENABLED &&
- (!have_enabled_thread_with_priority || n->has_priority(curr_tid))) {
+ if (enabled[curr_thread_index] == THREAD_ENABLED) {
return model->get_thread(curr_tid);
} else { // remove this threads from available threads
thread_list[random_index] = thread_list[avail_threads - 1];
void remove_sleep(Thread *t);
void add_sleep(Thread *t);
enabled_type_t get_enabled(const Thread *t) const;
- void update_sleep_set(Node *n);
bool is_enabled(const Thread *t) const;
bool is_enabled(thread_id_t tid) const;
bool is_sleep_set(const Thread *t) const;
printf("r2=%d\n",r2);
}
-int user_main(int argc, char **argv)
+int main(int argc, char **argv)
{
thrd_t t1, t2;
+++ /dev/null
-/**
- * @file workqueue.h
- * @brief Provides structures for queueing ModelChecker actions to be taken
- */
-
-#ifndef __WORKQUEUE_H__
-#define __WORKQUEUE_H__
-
-#include "mymemory.h"
-#include "stl-model.h"
-
-class ModelAction;
-
-typedef enum {
- WORK_NONE = 0, /**< No work to be done */
- WORK_CHECK_CURR_ACTION, /**< Check the current action; used for the
- first action of the work loop */
- WORK_CHECK_RELEASE_SEQ, /**< Check if any pending release sequences
- are resolved */
- WORK_CHECK_MO_EDGES, /**< Check if new mo_graph edges can be added */
-} model_work_t;
-
-/**
- */
-class WorkQueueEntry {
- public:
- /** @brief Type of work queue entry */
- model_work_t type;
-
- /**
- * @brief The ModelAction to work on
- * @see MOEdgeWorkEntry
- */
- ModelAction *action;
-};
-
-/**
- * @brief Work: perform initial promise, mo_graph checks on the current action
- *
- * This WorkQueueEntry performs the normal, first-pass checks for a ModelAction
- * that is currently being explored. The current ModelAction (@a action) is the
- * only relevant parameter to this entry.
- */
-class CheckCurrWorkEntry : public WorkQueueEntry {
- public:
- /**
- * @brief Constructor for a "check current action" work entry
- * @param curr The current action
- */
- CheckCurrWorkEntry(ModelAction *curr) {
- type = WORK_CHECK_CURR_ACTION;
- action = curr;
- }
-};
-
-/**
- * @brief Work: check a ModelAction for new mo_graph edges
- *
- * This WorkQueueEntry checks for new mo_graph edges for a particular
- * ModelAction (e.g., that was just generated or that updated its
- * synchronization). The ModelAction @a action is the only relevant parameter
- * to this entry.
- */
-class MOEdgeWorkEntry : public WorkQueueEntry {
- public:
- /**
- * @brief Constructor for a mo_edge work entry
- * @param updated The ModelAction which was updated, triggering this work
- */
- MOEdgeWorkEntry(ModelAction *updated) {
- type = WORK_CHECK_MO_EDGES;
- action = updated;
- }
-};
-
-/** @brief typedef for the work queue type */
-typedef ModelList<WorkQueueEntry> work_queue_t;
-
-#endif /* __WORKQUEUE_H__ */