X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=model.cc;h=d9ff365e6808494c4c667272f7dd9b7c4f9d5a5f;hb=bee1e27429b9a66e414eb83cf14e2089dd40a79e;hp=d523fc29ad786a861d0eca41eb0a3bd93dfa4991;hpb=c4c6c997cb5d2d651d4dfa76390a42592432d266;p=model-checker.git diff --git a/model.cc b/model.cc index d523fc2..d9ff365 100644 --- a/model.cc +++ b/model.cc @@ -61,7 +61,7 @@ struct model_snapshot_members { unsigned int next_thread_id; modelclock_t used_sequence_numbers; ModelAction *next_backtrack; - std::vector< bug_message *, SnapshotAlloc > bugs; + SnapVector bugs; struct execution_stats stats; bool failed_promise; bool too_many_reads; @@ -85,12 +85,12 @@ ModelChecker::ModelChecker(struct model_params params) : obj_map(new HashTable()), lock_waiters_map(new HashTable()), condvar_waiters_map(new HashTable()), - obj_thrd_map(new HashTable *, uintptr_t, 4 >()), - promises(new std::vector< Promise *, SnapshotAlloc >()), - futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc >()), - pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc >()), - thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc >(1)), - thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc >()), + obj_thrd_map(new HashTable *, uintptr_t, 4 >()), + promises(new SnapVector()), + futurevalues(new SnapVector()), + pending_rel_seqs(new SnapVector()), + thrd_last_action(new SnapVector(1)), + thrd_last_fence_release(new SnapVector()), node_stack(new NodeStack()), priv(new struct model_snapshot_members()), mo_graph(new CycleGraph()) @@ -137,11 +137,11 @@ static action_list_t * get_safe_ptr_action(HashTable * get_safe_ptr_vect_action(HashTable *, uintptr_t, 4> * hash, void * ptr) +static SnapVector * get_safe_ptr_vect_action(HashTable *, uintptr_t, 4> * hash, void * ptr) { - std::vector *tmp = hash->get(ptr); + SnapVector *tmp = hash->get(ptr); if (tmp == NULL) { - tmp = new std::vector(); + tmp = new SnapVector(); hash->put(ptr, tmp); } return tmp; @@ -204,34 +204,43 @@ Node * ModelChecker::get_curr_node() const return node_stack->get_head(); } +/** + * @brief Select the next thread to execute based on the curren action + * + * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE + * actions should be followed by the execution of their child thread. In either + * case, the current action should determine the next thread schedule. + * + * @param curr The current action + * @return The next thread to run, if the current action will determine this + * selection; otherwise NULL + */ +Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const +{ + /* Do not split atomic RMW */ + if (curr->is_rmwr()) + return get_thread(curr); + /* Follow CREATE with the created thread */ + if (curr->get_type() == THREAD_CREATE) + return curr->get_thread_operand(); + return NULL; +} + /** * @brief Choose the next thread to execute. * - * This function chooses the next thread that should execute. It can force the - * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be - * followed by a THREAD_START, or it can enforce execution replay/backtracking. - * The model-checker may have no preference regarding the next thread (i.e., - * when exploring a new execution ordering), in which case we defer to the - * scheduler. + * This function chooses the next thread that should execute. It can enforce + * execution replay/backtracking or, if the model-checker has no preference + * regarding the next thread (i.e., when exploring a new execution ordering), + * we defer to the scheduler. * - * @param curr Optional: The current ModelAction. Only used if non-NULL and it - * might guide the choice of next thread (i.e., THREAD_CREATE should be - * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC}) - * @return The next chosen thread to run, if any exist. Or else if no threads - * remain to be executed, return NULL. + * @return The next chosen thread to run, if any exist. Or else if the current + * execution should terminate, return NULL. */ -Thread * ModelChecker::get_next_thread(ModelAction *curr) +Thread * ModelChecker::get_next_thread() { thread_id_t tid; - if (curr != NULL) { - /* Do not split atomic actions. */ - if (curr->is_rmwr()) - return get_thread(curr); - else if (curr->get_type() == THREAD_CREATE) - return curr->get_thread_operand(); - } - /* * Have we completed exploring the preselected path? Then let the * scheduler decide @@ -400,6 +409,22 @@ bool ModelChecker::is_deadlocked() const return blocking_threads; } +/** + * Check if a Thread has entered a circular wait deadlock situation. This will + * not check other threads for potential deadlock situations, and may miss + * deadlocks involving WAIT. + * + * @param t The thread which may have entered a deadlock + * @return True if this Thread entered a deadlock; false otherwise + */ +bool ModelChecker::is_circular_wait(const Thread *t) const +{ + for (Thread *waiting = t->waiting_on() ; waiting != NULL; waiting = waiting->waiting_on()) + if (waiting == t) + return true; + return false; +} + /** * Check if this is a complete execution. That is, have all thread completed * execution (rather than exiting because sleep sets have forced a redundant @@ -514,7 +539,7 @@ void ModelChecker::print_execution(bool printbugs) const { print_program_output(); - if (DBG_ENABLED() || params.verbose) { + if (params.verbose) { model_print("Earliest divergence point since last feasible execution:\n"); if (earliest_diverge) earliest_diverge->print(); @@ -558,7 +583,7 @@ bool ModelChecker::next_execution() record_stats(); /* Output */ - if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports())) + if (params.verbose || (complete && have_bug_reports())) print_execution(complete); else clear_program_output(); @@ -617,8 +642,8 @@ ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const * load-acquire * or * load --sb-> fence-acquire */ - std::vector< ModelAction *, ModelAlloc > acquire_fences(get_num_threads(), NULL); - std::vector< ModelAction *, ModelAlloc > prior_loads(get_num_threads(), NULL); + ModelVector acquire_fences(get_num_threads(), NULL); + ModelVector prior_loads(get_num_threads(), NULL); bool found_acquire_fences = false; for ( ; rit != list->rend(); rit++) { ModelAction *prev = *rit; @@ -797,6 +822,21 @@ void ModelChecker::set_backtracking(ModelAction *act) if (unfair) continue; } + + /* See if CHESS-like yield fairness allows */ + if (model->params.yieldon) { + bool unfair = false; + for (int t = 0; t < node->get_num_threads(); t++) { + thread_id_t tother = int_to_id(t); + if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) { + unfair = true; + break; + } + } + if (unfair) + continue; + } + /* Cache the latest backtracking point */ set_latest_backtrack(prev); @@ -853,38 +893,39 @@ ModelAction * ModelChecker::get_next_backtrack() bool ModelChecker::process_read(ModelAction *curr) { Node *node = curr->get_node(); - uint64_t value = VALUE_NONE; - bool updated = false; while (true) { + bool updated = false; switch (node->get_read_from_status()) { case READ_FROM_PAST: { const ModelAction *rf = node->get_read_from_past(); ASSERT(rf); mo_graph->startChanges(); - value = rf->get_value(); - check_recency(curr, rf); - bool r_status = r_modification_order(curr, rf); - if (is_infeasible() && node->increment_read_from()) { - mo_graph->rollbackChanges(); - priv->too_many_reads = false; - continue; + ASSERT(!is_infeasible()); + if (!check_recency(curr, rf)) { + if (node->increment_read_from()) { + mo_graph->rollbackChanges(); + continue; + } else { + priv->too_many_reads = true; + } } + updated = r_modification_order(curr, rf); read_from(curr, rf); mo_graph->commitChanges(); mo_check_promises(curr, true); - - updated |= r_status; break; } case READ_FROM_PROMISE: { Promise *promise = curr->get_node()->get_read_from_promise(); - promise->add_reader(curr); - value = promise->get_value(); + if (promise->add_reader(curr)) + priv->failed_promise = true; curr->set_read_from_promise(promise); mo_graph->startChanges(); + if (!check_recency(curr, promise)) + priv->too_many_reads = true; updated = r_modification_order(curr, promise); mo_graph->commitChanges(); break; @@ -893,7 +934,6 @@ bool ModelChecker::process_read(ModelAction *curr) /* Read from future value */ struct future_value fv = node->get_future_value(); Promise *promise = new Promise(curr, fv); - value = fv.value; curr->set_read_from_promise(promise); promises->push_back(promise); mo_graph->startChanges(); @@ -904,7 +944,7 @@ bool ModelChecker::process_read(ModelAction *curr) default: ASSERT(false); } - get_thread(curr)->set_return_value(value); + get_thread(curr)->set_return_value(curr->get_return_value()); return updated; } } @@ -927,20 +967,15 @@ bool ModelChecker::process_read(ModelAction *curr) */ bool ModelChecker::process_mutex(ModelAction *curr) { - std::mutex *mutex = NULL; + std::mutex *mutex = curr->get_mutex(); struct std::mutex_state *state = NULL; - if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) { - mutex = (std::mutex *)curr->get_location(); - state = mutex->get_state(); - } else if (curr->is_wait()) { - mutex = (std::mutex *)curr->get_value(); + if (mutex) state = mutex->get_state(); - } switch (curr->get_type()) { case ATOMIC_TRYLOCK: { - bool success = !state->islocked; + bool success = !state->locked; curr->set_try_lock(success); if (!success) { get_thread(curr)->set_return_value(0); @@ -952,7 +987,7 @@ bool ModelChecker::process_mutex(ModelAction *curr) case ATOMIC_LOCK: { if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) assert_bug("Lock access before initialization"); - state->islocked = true; + state->locked = get_thread(curr); ModelAction *unlock = get_last_unlock(curr); //synchronize with the previous unlock statement if (unlock != NULL) { @@ -963,7 +998,7 @@ bool ModelChecker::process_mutex(ModelAction *curr) } case ATOMIC_UNLOCK: { //unlock the lock - state->islocked = false; + state->locked = NULL; //wake up the other threads action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location()); //activate all the waiting threads @@ -975,7 +1010,7 @@ bool ModelChecker::process_mutex(ModelAction *curr) } case ATOMIC_WAIT: { //unlock the lock - state->islocked = false; + state->locked = NULL; //wake up the other threads action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value()); //activate all the waiting threads @@ -1045,7 +1080,7 @@ void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *read bool ModelChecker::process_write(ModelAction *curr) { /* Readers to which we may send our future value */ - std::vector< ModelAction *, ModelAlloc > send_fv; + ModelVector send_fv; bool updated_mod_order = w_modification_order(curr, &send_fv); int promise_idx = get_promise_to_resolve(curr); @@ -1349,6 +1384,8 @@ bool ModelChecker::initialize_curr_action(ModelAction **curr) bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf) { ASSERT(rf); + ASSERT(rf->is_write()); + act->set_read_from(rf); if (act->is_acquire()) { rel_heads_list_t release_heads; @@ -1407,7 +1444,7 @@ bool ModelChecker::check_action_enabled(ModelAction *curr) { if (curr->is_lock()) { std::mutex *lock = (std::mutex *)curr->get_location(); struct std::mutex_state *state = lock->get_state(); - if (state->islocked) { + if (state->locked) { //Stick the action in the appropriate waiting queue get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr); return false; @@ -1456,6 +1493,11 @@ ModelAction * ModelChecker::check_current_action(ModelAction *curr) wake_up_sleeping_actions(curr); + /* Compute fairness information for CHESS yield algorithm */ + if (model->params.yieldon) { + curr->get_node()->update_yield(scheduler); + } + /* Add the action to lists before any other model-checking tasks */ if (!second_part_of_rmw) add_action_to_lists(curr); @@ -1641,30 +1683,71 @@ ModelAction * ModelChecker::process_rmw(ModelAction *act) { } /** - * Checks whether a thread has read from the same write for too many times - * without seeing the effects of a later write. + * A helper function for ModelChecker::check_recency, to check if the current + * thread is able to read from a different write/promise for 'params.maxreads' + * number of steps and if that write/promise should become visible (i.e., is + * ordered later in the modification order). This helps model memory liveness. + * + * @param curr The current action. Must be a read. + * @param rf The write/promise from which we plan to read + * @param other_rf The write/promise from which we may read + * @return True if we were able to read from other_rf for params.maxreads steps + */ +template +bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const +{ + /* Need a different write/promise */ + if (other_rf->equals(rf)) + return false; + + /* Only look for "newer" writes/promises */ + if (!mo_graph->checkReachable(rf, other_rf)) + return false; + + SnapVector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); + action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())]; + action_list_t::reverse_iterator rit = list->rbegin(); + ASSERT((*rit) == curr); + /* Skip past curr */ + rit++; + + /* Does this write/promise work for everyone? */ + for (int i = 0; i < params.maxreads; i++, rit++) { + ModelAction *act = *rit; + if (!act->may_read_from(other_rf)) + return false; + } + return true; +} + +/** + * Checks whether a thread has read from the same write or Promise for too many + * times without seeing the effects of a later write/Promise. * * Basic idea: - * 1) there must a different write that we could read from that would satisfy the modification order, - * 2) we must have read from the same value in excess of maxreads times, and - * 3) that other write must have been in the reads_from set for maxreads times. + * 1) there must a different write/promise that we could read from, + * 2) we must have read from the same write/promise in excess of maxreads times, + * 3) that other write/promise must have been in the reads_from set for maxreads times, and + * 4) that other write/promise must be mod-ordered after the write/promise we are reading. * * If so, we decide that the execution is no longer feasible. + * + * @param curr The current action. Must be a read. + * @param rf The ModelAction/Promise from which we might read. + * @return True if the read should succeed; false otherwise */ -void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) +template +bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const { if (!params.maxreads) - return; + return true; //NOTE: Next check is just optimization, not really necessary.... - if (curr->get_node()->get_read_from_past_size() <= 1) - return; - /* Must make sure that execution is currently feasible... We could - * accidentally clear by rolling back */ - if (is_infeasible()) - return; + if (curr->get_node()->get_read_from_past_size() + + curr->get_node()->get_read_from_promise_size() <= 1) + return true; - std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); + SnapVector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); int tid = id_to_int(curr->get_tid()); ASSERT(tid < (int)thrd_lists->size()); action_list_t *list = &(*thrd_lists)[tid]; @@ -1677,53 +1760,29 @@ void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) /* See if we have enough reads from the same value */ for (int count = 0; count < params.maxreads; ritcopy++, count++) { if (ritcopy == list->rend()) - return; + return true; ModelAction *act = *ritcopy; if (!act->is_read()) - return; - if (act->get_reads_from() != rf) - return; - if (act->get_node()->get_read_from_past_size() <= 1) - return; + return true; + if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf)) + return true; + if (act->get_reads_from() && !act->get_reads_from()->equals(rf)) + return true; + if (act->get_node()->get_read_from_past_size() + + act->get_node()->get_read_from_promise_size() <= 1) + return true; } for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) { - /* Get write */ const ModelAction *write = curr->get_node()->get_read_from_past(i); - - /* Need a different write */ - if (write == rf) - continue; - - //NOTE: SHOULD MAKE SURE write is MOd after rf - - /* Test to see whether this is a feasible write to read from */ - /** NOTE: all members of read-from set should be - * feasible, so we no longer check it here **/ - - ritcopy = rit; - - bool feasiblewrite = true; - /* now we need to see if this write works for everyone */ - - for (int loop = params.maxreads; loop > 0; loop--, ritcopy++) { - ModelAction *act = *ritcopy; - bool foundvalue = false; - for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) { - if (act->get_node()->get_read_from_past(j) == write) { - foundvalue = true; - break; - } - } - if (!foundvalue) { - feasiblewrite = false; - break; - } - } - if (feasiblewrite) { - priv->too_many_reads = true; - return; - } + if (should_read_instead(curr, rf, write)) + return false; /* liveness failure */ } + for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) { + const Promise *promise = curr->get_node()->get_read_from_promise(i); + if (should_read_instead(curr, rf, promise)) + return false; /* liveness failure */ + } + return true; } /** @@ -1746,13 +1805,16 @@ void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) template bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf) { - std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); + SnapVector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); unsigned int i; bool added = false; ASSERT(curr->is_read()); /* Last SC fence in the current thread */ ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL); + ModelAction *last_sc_write = NULL; + if (curr->is_seqcst()) + last_sc_write = get_last_seq_cst_write(curr); /* Iterate over all threads */ for (i = 0; i < thrd_lists->size(); i++) { @@ -1772,7 +1834,18 @@ bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf) for (rit = list->rbegin(); rit != list->rend(); rit++) { ModelAction *act = *rit; - if (act->is_write() && !act->equals(rf) && act != curr) { + /* Skip curr */ + if (act == curr) + continue; + /* Don't want to add reflexive edges on 'rf' */ + if (act->equals(rf)) { + if (act->happens_before(curr)) + break; + else + continue; + } + + if (act->is_write()) { /* C++, Section 29.3 statement 5 */ if (curr->is_seqcst() && last_sc_fence_thread_local && *act < *last_sc_fence_thread_local) { @@ -1793,15 +1866,19 @@ bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf) } } + /* C++, Section 29.3 statement 3 (second subpoint) */ + if (curr->is_seqcst() && last_sc_write && act == last_sc_write) { + added = mo_graph->addEdge(act, rf) || added; + break; + } + /* * Include at most one act per-thread that "happens - * before" curr. Don't consider reflexively. + * before" curr */ - if (act->happens_before(curr) && act != curr) { + if (act->happens_before(curr)) { if (act->is_write()) { - if (!act->equals(rf)) { - added = mo_graph->addEdge(act, rf) || added; - } + added = mo_graph->addEdge(act, rf) || added; } else { const ModelAction *prevrf = act->get_reads_from(); const Promise *prevrf_promise = act->get_reads_from_promise(); @@ -1852,9 +1929,9 @@ bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf) * value. If NULL, then don't record any future values. * @return True if modification order edges were added; false otherwise */ -bool ModelChecker::w_modification_order(ModelAction *curr, std::vector< ModelAction *, ModelAlloc > *send_fv) +bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector *send_fv) { - std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); + SnapVector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); unsigned int i; bool added = false; ASSERT(curr->is_write()); @@ -1998,7 +2075,7 @@ bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, cons */ bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader) { - std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location()); + SnapVector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location()); unsigned int i; /* Iterate over all threads */ for (i = 0; i < thrd_lists->size(); i++) { @@ -2099,7 +2176,7 @@ bool ModelChecker::release_seq_heads(const ModelAction *rf, release_heads->push_back(fence_release); int tid = id_to_int(rf->get_tid()); - std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location()); + SnapVector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location()); action_list_t *list = &(*thrd_lists)[tid]; action_list_t::const_reverse_iterator rit; @@ -2242,7 +2319,7 @@ void ModelChecker::get_release_seq_heads(ModelAction *acquire, bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue) { bool updated = false; - std::vector< struct release_seq *, SnapshotAlloc >::iterator it = pending_rel_seqs->begin(); + SnapVector::iterator it = pending_rel_seqs->begin(); while (it != pending_rel_seqs->end()) { struct release_seq *pending = *it; ModelAction *acquire = pending->acquire; @@ -2313,9 +2390,9 @@ void ModelChecker::add_action_to_lists(ModelAction *act) int uninit_id = -1; action_list_t *list = get_safe_ptr_action(obj_map, act->get_location()); if (list->empty() && act->is_atomic_var()) { - uninit = new_uninitialized_action(act->get_location()); + uninit = get_uninitialized_action(act); uninit_id = id_to_int(uninit->get_tid()); - list->push_back(uninit); + list->push_front(uninit); } list->push_back(act); @@ -2323,7 +2400,7 @@ void ModelChecker::add_action_to_lists(ModelAction *act) if (uninit) action_trace->push_front(uninit); - std::vector *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location()); + SnapVector *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location()); if (tid >= (int)vec->size()) vec->resize(priv->next_thread_id); (*vec)[tid].push_back(act); @@ -2346,7 +2423,7 @@ void ModelChecker::add_action_to_lists(ModelAction *act) void *mutex_loc = (void *) act->get_value(); get_safe_ptr_action(obj_map, mutex_loc)->push_back(act); - std::vector *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc); + SnapVector *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc); if (tid >= (int)vec->size()) vec->resize(priv->next_thread_id); (*vec)[tid].push_back(act); @@ -2395,8 +2472,11 @@ ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const action_list_t *list = get_safe_ptr_action(obj_map, location); /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */ action_list_t::reverse_iterator rit; - for (rit = list->rbegin(); rit != list->rend(); rit++) - if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr) + for (rit = list->rbegin(); (*rit) != curr; rit++) + ; + rit++; /* Skip past curr */ + for ( ; rit != list->rend(); rit++) + if ((*rit)->is_write() && (*rit)->is_seqcst()) return *rit; return NULL; } @@ -2490,8 +2570,7 @@ int ModelChecker::get_promise_to_resolve(const ModelAction *curr) const */ bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx) { - std::vector< ModelAction *, ModelAlloc > actions_to_check; - promise_list_t mustResolve; + ModelVector actions_to_check; Promise *promise = (*promises)[promise_idx]; for (unsigned int i = 0; i < promise->get_num_readers(); i++) { @@ -2501,20 +2580,23 @@ bool ModelChecker::resolve_promise(ModelAction *write, unsigned int promise_idx) } /* Make sure the promise's value matches the write's value */ ASSERT(promise->is_compatible(write) && promise->same_value(write)); - mo_graph->resolvePromise(promise, write, &mustResolve); + if (!mo_graph->resolvePromise(promise, write)) + priv->failed_promise = true; promises->erase(promises->begin() + promise_idx); - - /** @todo simplify the 'mustResolve' stuff */ - ASSERT(mustResolve.size() <= 1); - - if (!mustResolve.empty() && mustResolve[0] != promise) - priv->failed_promise = true; - delete promise; + /** + * @todo It is possible to end up in an inconsistent state, where a + * "resolved" promise may still be referenced if + * CycleGraph::resolvePromise() failed, so don't delete 'promise'. + * + * Note that the inconsistency only matters when dumping mo_graph to + * file. + * + * delete promise; + */ //Check whether reading these writes has made threads unable to //resolve promises - for (unsigned int i = 0; i < actions_to_check.size(); i++) { ModelAction *read = actions_to_check[i]; mo_check_promises(read, true); @@ -2663,7 +2745,7 @@ void ModelChecker::compute_relseq_breakwrites(ModelAction *curr) */ void ModelChecker::build_may_read_from(ModelAction *curr) { - std::vector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); + SnapVector *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location()); unsigned int i; ASSERT(curr->is_read()); @@ -2754,14 +2836,21 @@ bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *wri } /** - * @brief Create a new action representing an uninitialized atomic - * @param location The memory location of the atomic object - * @return A pointer to a new ModelAction + * @brief Get an action representing an uninitialized atomic + * + * This function may create a new one or try to retrieve one from the NodeStack + * + * @param curr The current action, which prompts the creation of an UNINIT action + * @return A pointer to the UNINIT ModelAction */ -ModelAction * ModelChecker::new_uninitialized_action(void *location) const +ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const { - ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction)); - act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread); + Node *node = curr->get_node(); + ModelAction *act = node->get_uninit_action(); + if (!act) { + act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread); + node->set_uninit_action(act); + } act->create_cv(NULL); return act; } @@ -2775,7 +2864,9 @@ static void print_list(action_list_t *list) unsigned int hash = 0; for (it = list->begin(); it != list->end(); it++) { - (*it)->print(); + const ModelAction *act = *it; + if (act->get_seq_number() > 0) + act->print(); hash = hash^(hash<<3)^((*it)->hash()); } model_print("HASH %u\n", hash); @@ -2842,6 +2933,14 @@ void ModelChecker::print_summary() const print_infeasibility(" INFEASIBLE"); print_list(action_trace); model_print("\n"); + if (!promises->empty()) { + model_print("Pending promises:\n"); + for (unsigned int i = 0; i < promises->size(); i++) { + model_print(" [P%u] ", i); + (*promises)[i]->print(); + } + model_print("\n"); + } } /** @@ -2855,15 +2954,6 @@ void ModelChecker::add_thread(Thread *t) scheduler->add_thread(t); } -/** - * Removes a thread from the scheduler. - * @param the thread to remove. - */ -void ModelChecker::remove_thread(Thread *t) -{ - scheduler->remove_thread(t); -} - /** * @brief Get a Thread reference by its ID * @param tid The Thread's ID @@ -2952,6 +3042,7 @@ uint64_t ModelChecker::switch_to_master(ModelAction *act) { DBG(); Thread *old = thread_current(); + scheduler->set_current_thread(NULL); ASSERT(!old->get_pending()); old->set_pending(act); if (Thread::swap(old, &system_context) < 0) { @@ -2988,7 +3079,11 @@ Thread * ModelChecker::take_step(ModelAction *curr) if (curr_thrd->is_blocked() || curr_thrd->is_complete()) scheduler->remove_thread(curr_thrd); - Thread *next_thrd = get_next_thread(curr); + Thread *next_thrd = NULL; + if (curr) + next_thrd = action_select_next_thread(curr); + if (!next_thrd) + next_thrd = get_next_thread(); DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1, next_thrd ? id_to_int(next_thrd->get_id()) : -1); @@ -3022,6 +3117,8 @@ void ModelChecker::run() Thread *thr = get_thread(tid); if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) { switch_from_master(thr); + if (is_circular_wait(thr)) + assert_bug("Deadlock detected"); } }