From: Brian Norris Date: Wed, 12 Sep 2012 00:52:02 +0000 (-0700) Subject: Merge branch 'demsky' X-Git-Tag: pldi2013~223 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=50b95c3747f4fe79b2ab1a1a6fc303224e16e418;hp=f2a014110185c734180545ffc1dc2f0dcd30fd0c;p=model-checker.git Merge branch 'demsky' Branch cleaned up by Brian Norris --- diff --git a/cyclegraph.cc b/cyclegraph.cc index 3168923..967b076 100644 --- a/cyclegraph.cc +++ b/cyclegraph.cc @@ -3,7 +3,8 @@ /** Initializes a CycleGraph object. */ CycleGraph::CycleGraph() : - hasCycles(false) + hasCycles(false), + oldCycles(false) { } @@ -39,6 +40,8 @@ void CycleGraph::addEdge(const ModelAction *from, const ModelAction *to) { // Check for Cycles hasCycles=checkReachable(tonode, fromnode); } + + rollbackvector.push_back(fromnode); fromnode->addEdge(tonode); CycleNode * rmwnode=fromnode->getRMW(); @@ -51,6 +54,7 @@ void CycleGraph::addEdge(const ModelAction *from, const ModelAction *to) { // Check for Cycles hasCycles=checkReachable(tonode, rmwnode); } + rollbackvector.push_back(rmwnode); rmwnode->addEdge(tonode); } } @@ -67,6 +71,8 @@ void CycleGraph::addRMWEdge(const ModelAction *from, const ModelAction *rmw) { /* Two RMW actions cannot read from the same write. */ if (fromnode->setRMW(rmwnode)) { hasCycles=true; + } else { + rmwrollbackvector.push_back(fromnode); } /* Transfer all outgoing edges from the from node to the rmw node */ @@ -75,9 +81,10 @@ void CycleGraph::addRMWEdge(const ModelAction *from, const ModelAction *rmw) { std::vector * edges=fromnode->getEdges(); for(unsigned int i=0;isize();i++) { CycleNode * tonode=(*edges)[i]; + rollbackvector.push_back(rmwnode); rmwnode->addEdge(tonode); } - + rollbackvector.push_back(fromnode); fromnode->addEdge(rmwnode); } @@ -126,6 +133,28 @@ bool CycleGraph::checkReachable(CycleNode *from, CycleNode *to) { return false; } +/** Commit changes to the cyclegraph. */ +void CycleGraph::commitChanges() { + rollbackvector.resize(0); + rmwrollbackvector.resize(0); + oldCycles=hasCycles; +} + +/** Rollback changes to the previous commit. */ +void CycleGraph::rollbackChanges() { + for (unsigned int i = 0; i < rollbackvector.size(); i++) { + rollbackvector[i]->popEdge(); + } + + for (unsigned int i = 0; i < rmwrollbackvector.size(); i++) { + rmwrollbackvector[i]->clearRMW(); + } + + hasCycles = oldCycles; + rollbackvector.resize(0); + rmwrollbackvector.resize(0); +} + /** @returns whether a CycleGraph contains cycles. */ bool CycleGraph::checkForCycles() { return hasCycles; @@ -166,7 +195,8 @@ CycleNode * CycleNode::getRMW() { * @see CycleGraph::addRMWEdge */ bool CycleNode::setRMW(CycleNode *node) { - CycleNode * oldhasRMW=hasRMW; + if (hasRMW!=NULL) + return true; hasRMW=node; - return (oldhasRMW!=NULL); + return false; } diff --git a/cyclegraph.h b/cyclegraph.h index 2e523d7..0bb01c4 100644 --- a/cyclegraph.h +++ b/cyclegraph.h @@ -22,6 +22,8 @@ class CycleGraph { void addRMWEdge(const ModelAction *from, const ModelAction *rmw); bool checkReachable(const ModelAction *from, const ModelAction *to); + void commitChanges(); + void rollbackChanges(); private: CycleNode * getNode(const ModelAction *); @@ -33,6 +35,11 @@ class CycleGraph { /** @brief A flag: true if this graph contains cycles */ bool hasCycles; + + bool oldCycles; + + std::vector rollbackvector; + std::vector rmwrollbackvector; }; /** @brief A node within a CycleGraph; corresponds to one ModelAction */ @@ -43,6 +50,12 @@ class CycleNode { std::vector * getEdges(); bool setRMW(CycleNode *); CycleNode* getRMW(); + void popEdge() { + edges.pop_back(); + }; + void clearRMW() { + hasRMW=NULL; + } private: /** @brief The ModelAction that this node represents */ diff --git a/main.cc b/main.cc index f58f304..215a219 100644 --- a/main.cc +++ b/main.cc @@ -14,30 +14,41 @@ #include "model.h" #include "snapshot-interface.h" +static void param_defaults(struct model_params * params) { + params->maxreads = 0; +} + static void print_usage() { printf( -"Usage: [OPTIONS]\n" +"Usage: [MC_OPTIONS] -- [PROGRAM ARGUMENTS]\n" "\n" "Options:\n" "-h Display this help message and exit\n" -); +"-m Maximum times a thread can read from the same write while other writes exist\n" +"-- Program arguments follow.\n\n"); exit(EXIT_SUCCESS); } -static void parse_options(struct model_params *params, int argc, char **argv) { - const char *shortopts = "h"; +static void parse_options(struct model_params *params, int *argc, char ***argv) { + const char *shortopts = "hm:"; int opt; bool error = false; - while (!error && (opt = getopt(argc, argv, shortopts)) != -1) { + while (!error && (opt = getopt(*argc, *argv, shortopts)) != -1) { switch (opt) { case 'h': print_usage(); break; + case 'm': + params->maxreads = atoi(optarg); + break; default: /* '?' */ error = true; break; } } + (*argc) -= optind; + (*argv) += optind; + if (error) print_usage(); } @@ -50,7 +61,9 @@ static void real_main() { thrd_t user_thread; struct model_params params; - parse_options(¶ms, main_argc, main_argv); + param_defaults(¶ms); + + parse_options(¶ms, &main_argc, &main_argv); //Initialize race detector initRaceDetector(); diff --git a/model.cc b/model.cc index ba239b9..3e54a00 100644 --- a/model.cc +++ b/model.cc @@ -33,6 +33,7 @@ ModelChecker::ModelChecker(struct model_params params) : node_stack(new NodeStack()), mo_graph(new CycleGraph()), failed_promise(false), + too_many_reads(false), asserted(false) { /* Allocate this "size" on the snapshotting heap */ @@ -75,6 +76,7 @@ void ModelChecker::reset_to_initial_state() DEBUG("+++ Resetting to initial state +++\n"); node_stack->reset_execution(); failed_promise = false; + too_many_reads = false; reset_asserted(); snapshotObject->backTrackBeforeStep(0); } @@ -256,6 +258,49 @@ ModelAction * ModelChecker::get_next_backtrack() return next; } +/** + * Processes a read or rmw model action. + * @param curr is the read model action to process. + * @param th is the thread + * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw. + * @return True if processing this read updates the mo_graph. + */ + +bool ModelChecker::process_read(ModelAction *curr, Thread * th, bool second_part_of_rmw) { + uint64_t value; + bool updated=false; + while(true) { + const ModelAction *reads_from = curr->get_node()->get_read_from(); + if (reads_from != NULL) { + value = reads_from->get_value(); + /* Assign reads_from, perform release/acquire synchronization */ + curr->read_from(reads_from); + if (!second_part_of_rmw) { + check_recency(curr,false); + } + + bool r_status=r_modification_order(curr,reads_from); + + if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||!curr->get_node()->future_value_empty())) { + mo_graph->rollbackChanges(); + too_many_reads=false; + continue; + } + + mo_graph->commitChanges(); + updated |= r_status; + } else { + /* Read from future value */ + value = curr->get_node()->get_future_value(); + curr->read_from(NULL); + Promise *valuepromise = new Promise(curr, value); + promises->push_back(valuepromise); + } + th->set_return_value(value); + return updated; + } +} + /** * This is the heart of the model checker routine. It performs model-checking * actions corresponding to a given "current action." Among other processes, it @@ -270,13 +315,13 @@ ModelAction * ModelChecker::get_next_backtrack() */ Thread * ModelChecker::check_current_action(ModelAction *curr) { - bool already_added = false; + bool second_part_of_rmw = false; ASSERT(curr); if (curr->is_rmwc() || curr->is_rmw()) { ModelAction *tmp = process_rmw(curr); - already_added = true; + second_part_of_rmw = true; delete curr; curr = tmp; } else { @@ -339,48 +384,40 @@ Thread * ModelChecker::check_current_action(ModelAction *curr) break; } - /* Assign reads_from values */ Thread *th = get_thread(curr->get_tid()); - uint64_t value = VALUE_NONE; + bool updated = false; if (curr->is_read()) { - const ModelAction *reads_from = curr->get_node()->get_read_from(); - if (reads_from != NULL) { - value = reads_from->get_value(); - /* Assign reads_from, perform release/acquire synchronization */ - curr->read_from(reads_from); - if (r_modification_order(curr,reads_from)) - updated = true; - } else { - /* Read from future value */ - value = curr->get_node()->get_future_value(); - curr->read_from(NULL); - Promise *valuepromise = new Promise(curr, value); - promises->push_back(valuepromise); - } - } else if (curr->is_write()) { - if (w_modification_order(curr)) - updated = true; - if (resolve_promises(curr)) - updated = true; + updated=process_read(curr, th, second_part_of_rmw); + } + + if (curr->is_write()) { + bool updated_mod_order=w_modification_order(curr); + bool updated_promises=resolve_promises(curr); + updated=updated_mod_order|updated_promises; + + mo_graph->commitChanges(); + th->set_return_value(VALUE_NONE); } if (updated) resolve_release_sequences(curr->get_location()); - th->set_return_value(value); - /* Add action to list. */ - if (!already_added) + if (!second_part_of_rmw) add_action_to_lists(curr); Node *currnode = curr->get_node(); Node *parnode = currnode->get_parent(); - if (!parnode->backtrack_empty() || !currnode->read_from_empty() || - !currnode->future_value_empty() || !currnode->promise_empty()) - if (!priv->next_backtrack || *curr > *priv->next_backtrack) - priv->next_backtrack = curr; + if ((!parnode->backtrack_empty() || + !currnode->read_from_empty() || + !currnode->future_value_empty() || + !currnode->promise_empty()) + && (!priv->next_backtrack || + *curr > *priv->next_backtrack)) { + priv->next_backtrack = curr; + } set_backtracking(curr); @@ -395,7 +432,7 @@ bool ModelChecker::isfeasibleprefix() { /** @returns whether the current partial trace is feasible. */ bool ModelChecker::isfeasible() { - return !mo_graph->checkForCycles() && !failed_promise; + return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads; } /** Returns whether the current completed trace is feasible. */ @@ -413,6 +450,101 @@ ModelAction * ModelChecker::process_rmw(ModelAction *act) { return lastread; } +/** + * Checks whether a thread has read from the same write for too many times + * without seeing the effects of a later write. + * + * Basic idea: + * 1) there must a different write that we could read from that would satisfy the modification order, + * 2) we must have read from the same value in excess of maxreads times, and + * 3) that other write must have been in the reads_from set for maxreads times. + * + * If so, we decide that the execution is no longer feasible. + */ +void ModelChecker::check_recency(ModelAction *curr, bool already_added) { + if (params.maxreads != 0) { + if (curr->get_node()->get_read_from_size() <= 1) + return; + + //Must make sure that execution is currently feasible... We could + //accidentally clear by rolling back + if (!isfeasible()) + return; + + std::vector *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location()); + int tid = id_to_int(curr->get_tid()); + + /* Skip checks */ + if ((int)thrd_lists->size() <= tid) + return; + + action_list_t *list = &(*thrd_lists)[tid]; + + action_list_t::reverse_iterator rit = list->rbegin(); + /* Skip past curr */ + if (!already_added) { + for (; (*rit) != curr; rit++) + ; + /* go past curr now */ + rit++; + } + + action_list_t::reverse_iterator ritcopy=rit; + //See if we have enough reads from the same value + int count=0; + for (; count < params.maxreads; rit++,count++) { + if (rit==list->rend()) + return; + ModelAction *act = *rit; + if (!act->is_read()) + return; + if (act->get_reads_from() != curr->get_reads_from()) + return; + if (act->get_node()->get_read_from_size() <= 1) + return; + } + + for (int i=0;iget_node()->get_read_from_size();i++) { + //Get write + const ModelAction * write=curr->get_node()->get_read_from_at(i); + //Need a different write + if (write==curr->get_reads_from()) + continue; + + /* Test to see whether this is a feasible write to read from*/ + r_modification_order(curr, write); + bool feasiblereadfrom=isfeasible(); + mo_graph->rollbackChanges(); + + if (!feasiblereadfrom) + continue; + rit=ritcopy; + + bool feasiblewrite=true; + //new we need to see if this write works for everyone + + for (int loop=count;loop>0;loop--,rit++) { + ModelAction *act=*rit; + bool foundvalue=false; + for(int j=0;jget_node()->get_read_from_size();j++) { + if (act->get_node()->get_read_from_at(i)==write) { + foundvalue=true; + break; + } + } + if (!foundvalue) { + feasiblewrite=false; + break; + } + } + if (feasiblewrite) { + too_many_reads = true; + return; + } + } + } +} + /** * Updates the mo_graph with the constraints imposed from the current read. * @param curr The current action. Must be a read. @@ -523,10 +655,16 @@ bool ModelChecker::w_modification_order(ModelAction *curr) /* Include at most one act per-thread that "happens before" curr */ if (act->happens_before(curr)) { - if (act->is_read()) - mo_graph->addEdge(act->get_reads_from(), curr); - else + /* + * Note: if act is RMW, just add edge: + * act --mo--> curr + * The following edge should be handled elsewhere: + * readfrom(act) --mo--> act + */ + if (act->is_write()) mo_graph->addEdge(act, curr); + else if (act->is_read() && act->get_reads_from() != NULL) + mo_graph->addEdge(act->get_reads_from(), curr); added = true; break; } else if (act->is_read() && !act->is_synchronizing(curr) && diff --git a/model.h b/model.h index 1b6bb10..c3e5830 100644 --- a/model.h +++ b/model.h @@ -28,6 +28,7 @@ class Promise; * the model checker. */ struct model_params { + int maxreads; }; /** @@ -100,9 +101,11 @@ private: */ void set_current_action(ModelAction *act) { priv->current_action = act; } Thread * check_current_action(ModelAction *curr); + bool process_read(ModelAction *curr, Thread * th, bool second_part_of_rmw); bool take_step(); + void check_recency(ModelAction *curr, bool already_added); ModelAction * get_last_conflict(ModelAction *act); void set_backtracking(ModelAction *act); Thread * get_next_thread(ModelAction *curr); @@ -176,6 +179,7 @@ private: */ CycleGraph *mo_graph; bool failed_promise; + bool too_many_reads; bool asserted; }; diff --git a/nodestack.cc b/nodestack.cc index 2ebfc71..a9399ed 100644 --- a/nodestack.cc +++ b/nodestack.cc @@ -221,6 +221,14 @@ uint64_t Node::get_future_value() { return future_values[future_index]; } +int Node::get_read_from_size() { + return may_read_from.size(); +} + +const ModelAction * Node::get_read_from_at(int i) { + return may_read_from[i]; +} + /** * Gets the next 'may_read_from' action from this Node. Only valid for a node * where this->action is a 'read'. diff --git a/nodestack.h b/nodestack.h index b440b11..b391645 100644 --- a/nodestack.h +++ b/nodestack.h @@ -64,6 +64,8 @@ public: const ModelAction * get_read_from(); bool increment_read_from(); bool read_from_empty(); + int get_read_from_size(); + const ModelAction * get_read_from_at(int i); void set_promise(unsigned int i); bool get_promise(unsigned int i); diff --git a/test/linuxrwlocks.c b/test/linuxrwlocks.c index a616b67..dcf323c 100644 --- a/test/linuxrwlocks.c +++ b/test/linuxrwlocks.c @@ -28,32 +28,32 @@ static inline int write_can_lock(rwlock_t *lock) static inline void read_lock(rwlock_t *rw) { - int currentvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire); - while (currentvalue<0) { + int priorvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire); + while (priorvalue<=0) { atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed); do { - currentvalue=atomic_load_explicit(&rw->lock, memory_order_relaxed); - } while(currentvalue<=0); - currentvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire); + priorvalue=atomic_load_explicit(&rw->lock, memory_order_relaxed); + } while(priorvalue<=0); + priorvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire); } } static inline void write_lock(rwlock_t *rw) { - int currentvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire); - while (currentvalue!=0) { + int priorvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire); + while (priorvalue!=RW_LOCK_BIAS) { atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed); do { - currentvalue=atomic_load_explicit(&rw->lock, memory_order_relaxed); - } while(currentvalue!=RW_LOCK_BIAS); - currentvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire); + priorvalue=atomic_load_explicit(&rw->lock, memory_order_relaxed); + } while(priorvalue!=RW_LOCK_BIAS); + priorvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire); } } static inline int read_trylock(rwlock_t *rw) { - int currentvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire); - if (currentvalue>=0) + int priorvalue=atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire); + if (priorvalue>0) return 1; atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed); @@ -62,8 +62,8 @@ static inline int read_trylock(rwlock_t *rw) static inline int write_trylock(rwlock_t *rw) { - int currentvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire); - if (currentvalue>=0) + int priorvalue=atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire); + if (priorvalue==RW_LOCK_BIAS) return 1; atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);