From: Brian Demsky Date: Mon, 8 Oct 2012 08:21:35 +0000 (-0700) Subject: merge massive speedup with release sequence support... X-Git-Tag: pldi2013~97 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=b8b39c87557325a384faa45d0cae56a6f71f52b1;hp=-c;p=model-checker.git merge massive speedup with release sequence support... Merge branch 'master' of ssh://demsky.eecs.uci.edu/home/git/model-checker Conflicts: schedule.cc schedule.h --- b8b39c87557325a384faa45d0cae56a6f71f52b1 diff --combined action.cc index 8dba82f,09352da..c1adc2e --- a/action.cc +++ b/action.cc @@@ -8,21 -8,32 +8,34 @@@ #include "clockvector.h" #include "common.h" #include "threads.h" +#include "nodestack.h" #define ACTION_INITIAL_CLOCK 0 - ModelAction::ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value) : + /** + * @brief Construct a new ModelAction + * + * @param type The type of action + * @param order The memory order of this action. A "don't care" for non-ATOMIC + * actions (e.g., THREAD_* or MODEL_* actions). + * @param loc The location that this action acts upon + * @param value (optional) A value associated with the action (e.g., the value + * read or written). Defaults to a given macro constant, for debugging purposes. + * @param thread (optional) The Thread in which this action occurred. If NULL + * (default), then a Thread is assigned according to the scheduler. + */ + ModelAction::ModelAction(action_type_t type, memory_order order, void *loc, + uint64_t value, Thread *thread) : type(type), order(order), location(loc), value(value), reads_from(NULL), + node(NULL), seq_number(ACTION_INITIAL_CLOCK), cv(NULL) { - Thread *t = thread_current(); + Thread *t = thread ? thread : thread_current(); this->tid = t->get_id(); } @@@ -52,6 -63,11 +65,11 @@@ void ModelAction::set_seq_number(modelc seq_number = num; } + bool ModelAction::is_relseq_fixup() const + { + return type == MODEL_FIXUP_RELSEQ; + } + bool ModelAction::is_mutex_op() const { return type == ATOMIC_LOCK || type == ATOMIC_TRYLOCK || type == ATOMIC_UNLOCK; @@@ -307,6 -323,9 +325,9 @@@ void ModelAction::print() cons { const char *type_str, *mo_str; switch (this->type) { + case MODEL_FIXUP_RELSEQ: + type_str = "relseq fixup"; + break; case THREAD_CREATE: type_str = "thread create"; break; diff --combined model.cc index 2c27567,8267377..4f2719f --- a/model.cc +++ b/model.cc @@@ -47,6 -47,10 +47,10 @@@ ModelChecker::ModelChecker(struct model priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv)); /* First thread created will have id INITIAL_THREAD_ID */ priv->next_thread_id = INITIAL_THREAD_ID; + + /* Initialize a model-checker thread, for special ModelActions */ + model_thread = new Thread(get_next_id()); + thread_map->put(id_to_int(model_thread->get_id()), model_thread); } /** @brief Destructor */ @@@ -150,9 -154,6 +154,9 @@@ Thread * ModelChecker::get_next_thread( earliest_diverge=diverge; Node *nextnode = next->get_node(); + Node *prevnode = nextnode->get_parent(); + scheduler->update_sleep_set(prevnode); + /* Reached divergence point */ if (nextnode->increment_promise()) { /* The next node will try to satisfy a different set of promises. */ @@@ -166,20 -167,19 +170,24 @@@ /* The next node will try to read from a different future value. */ tid = next->get_tid(); node_stack->pop_restofstack(2); + } else if (nextnode->increment_relseq_break()) { + /* The next node will try to resolve a release sequence differently */ + tid = next->get_tid(); + node_stack->pop_restofstack(2); } else { /* Make a different thread execute for next step */ - Node *node = nextnode->get_parent(); - tid = node->get_next_backtrack(); + scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid()))); + tid = prevnode->get_next_backtrack(); + /* Make sure the backtracked thread isn't sleeping. */ + scheduler->remove_sleep(thread_map->get(id_to_int(tid))); node_stack->pop_restofstack(1); if (diverge==earliest_diverge) { - earliest_diverge=node->get_action(); + earliest_diverge=prevnode->get_action(); } } + /* The correct sleep set is in the parent node. */ + execute_sleep_set(); + DEBUG("*** Divergence point ***\n"); diverge = NULL; @@@ -191,40 -191,6 +199,40 @@@ return thread_map->get(id_to_int(tid)); } +/** + * We need to know what the next actions of all threads in the sleep + * set will be. This method computes them and stores the actions at + * the corresponding thread object's pending action. + */ + +void ModelChecker::execute_sleep_set() { + for(unsigned int i=0;iget_enabled(thr) == THREAD_SLEEP_SET ) { + thr->set_state(THREAD_RUNNING); + scheduler->next_thread(thr); + Thread::swap(&system_context, thr); + thr->set_pending(priv->current_action); + } + } + priv->current_action = NULL; +} + +void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) { + for(unsigned int i=0;iget_enabled(thr) == THREAD_SLEEP_SET ) { + ModelAction *pending_act=thr->get_pending(); + if (pending_act->could_synchronize_with(curr)) { + //Remove this thread from sleep set + scheduler->remove_sleep(thr); + } + } + } +} + /** * Queries the model-checker for more executions to explore and, if one * exists, resets the model-checker state to execute a new execution. @@@ -249,7 -215,7 +257,7 @@@ bool ModelChecker::next_execution( num_feasible_executions++; } - DEBUG("Number of acquires waiting on pending release sequences: %lu\n", + DEBUG("Number of acquires waiting on pending release sequences: %zu\n", pending_rel_seqs->size()); if (isfinalfeasible() || DBG_ENABLED()) @@@ -312,7 -278,7 +320,7 @@@ ModelAction * ModelChecker::get_last_co return NULL; } -/** This method find backtracking points where we should try to +/** This method finds backtracking points where we should try to * reorder the parameter ModelAction against. * * @param the ModelAction to find backtracking points for. @@@ -337,10 -303,9 +345,10 @@@ void ModelChecker::set_backtracking(Mod for(int i = low_tid; i < high_tid; i++) { thread_id_t tid = int_to_id(i); + if (!node->is_enabled(tid)) continue; - + /* Check if this has been explored already */ if (node->has_been_explored(tid)) continue; @@@ -358,6 -323,7 +366,6 @@@ if (unfair) continue; } - /* Cache the latest backtracking point */ if (!priv->next_backtrack || *prev > *priv->next_backtrack) priv->next_backtrack = prev; @@@ -582,6 -548,76 +590,76 @@@ bool ModelChecker::process_thread_actio return updated; } + /** + * @brief Process the current action for release sequence fixup activity + * + * Performs model-checker release sequence fixups for the current action, + * forcing a single pending release sequence to break (with a given, potential + * "loose" write) or to complete (i.e., synchronize). If a pending release + * sequence forms a complete release sequence, then we must perform the fixup + * synchronization, mo_graph additions, etc. + * + * @param curr The current action; must be a release sequence fixup action + * @param work_queue The work queue to which to add work items as they are + * generated + */ + void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue) + { + const ModelAction *write = curr->get_node()->get_relseq_break(); + struct release_seq *sequence = pending_rel_seqs->back(); + pending_rel_seqs->pop_back(); + ASSERT(sequence); + ModelAction *acquire = sequence->acquire; + const ModelAction *rf = sequence->rf; + const ModelAction *release = sequence->release; + ASSERT(acquire); + ASSERT(release); + ASSERT(rf); + ASSERT(release->same_thread(rf)); + + if (write == NULL) { + /** + * @todo Forcing a synchronization requires that we set + * modification order constraints. For instance, we can't allow + * a fixup sequence in which two separate read-acquire + * operations read from the same sequence, where the first one + * synchronizes and the other doesn't. Essentially, we can't + * allow any writes to insert themselves between 'release' and + * 'rf' + */ + + /* Must synchronize */ + if (!acquire->synchronize_with(release)) { + set_bad_synchronization(); + return; + } + /* Re-check all pending release sequences */ + work_queue->push_back(CheckRelSeqWorkEntry(NULL)); + /* Re-check act for mo_graph edges */ + work_queue->push_back(MOEdgeWorkEntry(acquire)); + + /* propagate synchronization to later actions */ + action_list_t::reverse_iterator rit = action_trace->rbegin(); + for (; (*rit) != acquire; rit++) { + ModelAction *propagate = *rit; + if (acquire->happens_before(propagate)) { + propagate->synchronize_with(acquire); + /* Re-check 'propagate' for mo_graph edges */ + work_queue->push_back(MOEdgeWorkEntry(propagate)); + } + } + } else { + /* Break release sequence with new edges: + * release --mo--> write --mo--> rf */ + mo_graph->addEdge(release, write); + mo_graph->addEdge(write, rf); + } + + /* See if we have realized a data race */ + if (checkDataRaces()) + set_assert(); + } + /** * Initialize the current action by performing one or more of the following * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward @@@ -633,6 -669,8 +711,8 @@@ ModelAction * ModelChecker::initialize_ */ if (newcurr->is_write()) compute_promises(newcurr); + else if (newcurr->is_relseq_fixup()) + compute_relseq_breakwrites(newcurr); } return newcurr; } @@@ -673,6 -711,7 +753,6 @@@ bool ModelChecker::check_action_enabled Thread * ModelChecker::check_current_action(ModelAction *curr) { ASSERT(curr); - bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw(); if (!check_action_enabled(curr)) { @@@ -683,11 -722,8 +763,11 @@@ return get_next_thread(NULL); } + wake_up_sleeping_actions(curr); + ModelAction *newcurr = initialize_curr_action(curr); + /* Add the action to lists before any other model-checking tasks */ if (!second_part_of_rmw) add_action_to_lists(newcurr); @@@ -699,6 -735,7 +779,6 @@@ /* Initialize work_queue with the "current action" work */ work_queue_t work_queue(1, CheckCurrWorkEntry(curr)); - while (!work_queue.empty()) { WorkQueueEntry work = work_queue.front(); work_queue.pop_front(); @@@ -721,6 -758,9 +801,9 @@@ if (act->is_mutex_op() && process_mutex(act)) update_all = true; + if (act->is_relseq_fixup()) + process_relseq_fixup(curr, &work_queue); + if (update_all) work_queue.push_back(CheckRelSeqWorkEntry(NULL)); else if (update) @@@ -757,7 -797,9 +840,7 @@@ } check_curr_backtracking(curr); - set_backtracking(curr); - return get_next_thread(curr); } @@@ -781,7 -823,8 +864,8 @@@ void ModelChecker::check_curr_backtrack if ((!parnode->backtrack_empty() || !currnode->read_from_empty() || !currnode->future_value_empty() || - !currnode->promise_empty()) + !currnode->promise_empty() || + !currnode->relseq_break_empty()) && (!priv->next_backtrack || *curr > *priv->next_backtrack)) { priv->next_backtrack = curr; @@@ -1299,10 -1342,14 +1383,14 @@@ bool ModelChecker::release_seq_heads(co bool future_ordered = false; ModelAction *last = get_last_action(int_to_id(i)); - if (last && (rf->happens_before(last) || - get_thread(int_to_id(i))->is_complete())) + Thread *th = get_thread(int_to_id(i)); + if ((last && rf->happens_before(last)) || + !scheduler->is_enabled(th) || + th->is_complete()) future_ordered = true; + ASSERT(!th->is_model_thread() || future_ordered); + for (rit = list->rbegin(); rit != list->rend(); rit++) { const ModelAction *act = *rit; /* Reach synchronization -> this thread is complete */ @@@ -1691,6 -1738,29 +1779,29 @@@ void ModelChecker::mo_check_promises(th } } + /** + * Compute the set of writes that may break the current pending release + * sequence. This information is extracted from previou release sequence + * calculations. + * + * @param curr The current ModelAction. Must be a release sequence fixup + * action. + */ + void ModelChecker::compute_relseq_breakwrites(ModelAction *curr) + { + if (pending_rel_seqs->empty()) + return; + + struct release_seq *pending = pending_rel_seqs->back(); + for (unsigned int i = 0; i < pending->writes.size(); i++) { + const ModelAction *write = pending->writes[i]; + curr->get_node()->add_relseq_break(write); + } + + /* NULL means don't break the sequence; just synchronize */ + curr->get_node()->add_relseq_break(NULL); + } + /** * Build up an initial set of all past writes that this 'read' action may read * from. This set is determined by the clock vector's "happens before" @@@ -1894,7 -1964,7 +2005,7 @@@ bool ModelChecker::take_step() if (has_asserted()) return false; - Thread *curr = thread_current(); + Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL; if (curr) { if (curr->get_state() == THREAD_READY) { ASSERT(priv->current_action); @@@ -1917,6 -1987,26 +2028,26 @@@ DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1, next ? id_to_int(next->get_id()) : -1); + /* + * Launch end-of-execution release sequence fixups only when there are: + * + * (1) no more user threads to run (or when execution replay chooses + * the 'model_thread') + * (2) pending release sequences + * (3) pending assertions (i.e., data races) + * (4) no pending promises + */ + if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) && + isfinalfeasible() && !unrealizedraces.empty()) { + printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n", + pending_rel_seqs->size()); + ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ, + std::memory_order_seq_cst, NULL, VALUE_NONE, + model_thread); + set_current_action(fixup); + return true; + } + /* next == NULL -> don't take any more steps */ if (!next) return false; diff --combined model.h index d2baaf4,68831b8..7bc3585 --- a/model.h +++ b/model.h @@@ -124,8 -124,7 +124,8 @@@ private int num_executions; int num_feasible_executions; bool promises_expired(); - + void execute_sleep_set(); + void wake_up_sleeping_actions(ModelAction * curr); modelclock_t get_next_seq_num(); /** @@@ -141,6 -140,7 +141,7 @@@ bool process_write(ModelAction *curr); bool process_mutex(ModelAction *curr); bool process_thread_action(ModelAction *curr); + void process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue); bool check_action_enabled(ModelAction *curr); bool take_step(); @@@ -153,6 -153,7 +154,7 @@@ void reset_to_initial_state(); bool resolve_promises(ModelAction *curr); void compute_promises(ModelAction *curr); + void compute_relseq_breakwrites(ModelAction *curr); void check_curr_backtracking(ModelAction * curr); void add_action_to_lists(ModelAction *act); @@@ -202,6 -203,10 +204,10 @@@ * together for efficiency and maintainability. */ struct model_snapshot_members *priv; + /** A special model-checker Thread; used for associating with + * model-checker-related ModelAcitons */ + Thread *model_thread; + /** * @brief The modification order graph * diff --combined nodestack.cc index 76d167d,72c8d5c..3db80e8 --- a/nodestack.cc +++ b/nodestack.cc @@@ -32,7 -32,9 +32,9 @@@ Node::Node(ModelAction *act, Node *par may_read_from(), read_from_index(0), future_values(), - future_index(-1) + future_index(-1), + relseq_break_writes(), + relseq_break_index(0) { if (act) { act->set_node(this); @@@ -265,13 -267,13 +267,13 @@@ thread_id_t Node::get_next_backtrack( bool Node::is_enabled(Thread *t) { int thread_id=id_to_int(t->get_id()); - return thread_id < num_threads && (enabled_array[thread_id] == THREAD_ENABLED); + return thread_id < num_threads && (enabled_array[thread_id] != THREAD_DISABLED); } bool Node::is_enabled(thread_id_t tid) { int thread_id=id_to_int(tid); - return thread_id < num_threads && (enabled_array[thread_id] == THREAD_ENABLED); + return thread_id < num_threads && (enabled_array[thread_id] != THREAD_DISABLED); } bool Node::has_priority(thread_id_t tid) @@@ -352,6 -354,58 +354,58 @@@ bool Node::increment_future_value() return false; } + /** + * Add a write ModelAction to the set of writes that may break the release + * sequence. This is used during replay exploration of pending release + * sequences. This Node must correspond to a release sequence fixup action. + * + * @param write The write that may break the release sequence. NULL means we + * allow the release sequence to synchronize. + */ + void Node::add_relseq_break(const ModelAction *write) + { + relseq_break_writes.push_back(write); + } + + /** + * Get the write that may break the current pending release sequence, + * according to the replay / divergence pattern. + * + * @return A write that may break the release sequence. If NULL, that means + * the release sequence should not be broken. + */ + const ModelAction * Node::get_relseq_break() + { + if (relseq_break_index < (int)relseq_break_writes.size()) + return relseq_break_writes[relseq_break_index]; + else + return NULL; + } + + /** + * Increments the index into the relseq_break_writes set to explore the next + * item. + * @return Returns false if we have explored all values. + */ + bool Node::increment_relseq_break() + { + DBG(); + promises.clear(); + if (relseq_break_index < ((int)relseq_break_writes.size())) { + relseq_break_index++; + return (relseq_break_index < ((int)relseq_break_writes.size())); + } + return false; + } + + /** + * @return True if all writes that may break the release sequence have been + * explored + */ + bool Node::relseq_break_empty() { + return ((relseq_break_index + 1) >= ((int)relseq_break_writes.size())); + } + void Node::explore(thread_id_t tid) { int i = id_to_int(tid); diff --combined nodestack.h index 147b004,cb281ca..803d2b8 --- a/nodestack.h +++ b/nodestack.h @@@ -90,8 -90,12 +90,13 @@@ public bool get_promise(unsigned int i); bool increment_promise(); bool promise_empty(); + enabled_type_t *get_enabled_array() {return enabled_array;} + void add_relseq_break(const ModelAction *write); + const ModelAction * get_relseq_break(); + bool increment_relseq_break(); + bool relseq_break_empty(); + void print(); void print_may_read_from(); @@@ -117,6 -121,9 +122,9 @@@ private std::vector< struct future_value, ModelAlloc > future_values; std::vector< promise_t, ModelAlloc > promises; int future_index; + + std::vector< const ModelAction *, ModelAlloc > relseq_break_writes; + int relseq_break_index; }; typedef std::vector< Node *, ModelAlloc< Node * > > node_list_t; diff --combined schedule.cc index c5e58fe,dd35237..ea1d582 --- a/schedule.cc +++ b/schedule.cc @@@ -5,11 -5,10 +5,11 @@@ #include "schedule.h" #include "common.h" #include "model.h" +#include "nodestack.h" /** Constructor */ Scheduler::Scheduler() : - is_enabled(NULL), + enabled(NULL), enabled_len(0), curr_thread_index(0), current(NULL) @@@ -21,47 -20,25 +21,58 @@@ void Scheduler::set_enabled(Thread *t, if (threadid>=enabled_len) { enabled_type_t *new_enabled = (enabled_type_t *)snapshot_malloc(sizeof(enabled_type_t) * (threadid + 1)); memset(&new_enabled[enabled_len], 0, (threadid+1-enabled_len)*sizeof(enabled_type_t)); - if (is_enabled != NULL) { - memcpy(new_enabled, is_enabled, enabled_len*sizeof(enabled_type_t)); - snapshot_free(is_enabled); + if (enabled != NULL) { + memcpy(new_enabled, enabled, enabled_len*sizeof(enabled_type_t)); + snapshot_free(enabled); } - is_enabled=new_enabled; + enabled=new_enabled; enabled_len=threadid+1; } - is_enabled[threadid]=enabled_status; + enabled[threadid]=enabled_status; + } + + /** + * @brief Check if a Thread is currently enabled + * @param t The Thread to check + * @return True if the Thread is currently enabled + */ + bool Scheduler::is_enabled(Thread *t) const + { + int id = id_to_int(t->get_id()); - return (id >= enabled_len) ? false : (enabled[id] == THREAD_ENABLED); ++ return (id >= enabled_len) ? false : (enabled[id] != THREAD_DISABLED); +} + +enabled_type_t Scheduler::get_enabled(Thread *t) { - return is_enabled[id_to_int(t->get_id())]; ++ return enabled[id_to_int(t->get_id())]; +} + +void Scheduler::update_sleep_set(Node *n) { + enabled_type_t *enabled_array=n->get_enabled_array(); + for(int i=0;iget_id())); + set_enabled(t, THREAD_SLEEP_SET); +} + +/** + * Remove a Thread from the sleep set. + * @param t The Thread to remove + */ +void Scheduler::remove_sleep(Thread *t) +{ + DEBUG("thread %d\n", id_to_int(t->get_id())); + set_enabled(t, THREAD_ENABLED); } /** @@@ -71,6 -48,7 +82,7 @@@ void Scheduler::add_thread(Thread *t) { DEBUG("thread %d\n", id_to_int(t->get_id())); + ASSERT(!t->is_model_thread()); set_enabled(t, THREAD_ENABLED); } @@@ -102,6 -80,7 +114,7 @@@ void Scheduler::sleep(Thread *t */ void Scheduler::wake(Thread *t) { + ASSERT(!t->is_model_thread()); set_enabled(t, THREAD_DISABLED); t->set_state(THREAD_READY); } @@@ -120,7 -99,7 +133,7 @@@ Thread * Scheduler::next_thread(Thread int old_curr_thread = curr_thread_index; while(true) { curr_thread_index = (curr_thread_index+1) % enabled_len; - if (is_enabled[curr_thread_index]==THREAD_ENABLED) { - if (enabled[curr_thread_index]) { ++ if (enabled[curr_thread_index]==THREAD_ENABLED) { t = model->get_thread(int_to_id(curr_thread_index)); break; } @@@ -129,6 -108,9 +142,9 @@@ return NULL; } } + } else if (t->is_model_thread()) { + /* model-checker threads never run */ + t = NULL; } else { curr_thread_index = id_to_int(t->get_id()); } @@@ -143,6 -125,7 +159,7 @@@ */ Thread * Scheduler::get_current_thread() const { + ASSERT(!current || !current->is_model_thread()); return current; } diff --combined schedule.h index 3a54e8c,3feddd9..7267059 --- a/schedule.h +++ b/schedule.h @@@ -10,7 -10,6 +10,7 @@@ /* Forward declaration */ class Thread; +class Node; typedef enum enabled_type { THREAD_DISABLED, @@@ -30,15 -29,13 +30,17 @@@ public Thread * next_thread(Thread *t); Thread * get_current_thread() const; void print() const; - enabled_type_t * get_enabled() { return is_enabled; }; + enabled_type_t * get_enabled() { return enabled; }; + void remove_sleep(Thread *t); + void add_sleep(Thread *t); + enabled_type_t get_enabled(Thread *t); + void update_sleep_set(Node *n); + bool is_enabled(Thread *t) const; + SNAPSHOTALLOC private: /** The list of available Threads that are not currently running */ - enabled_type_t * is_enabled; + enabled_type_t *enabled; int enabled_len; int curr_thread_index; void set_enabled(Thread *t, enabled_type_t enabled_status);