8 #include "snapshot-interface.h"
10 #include "clockvector.h"
11 #include "cyclegraph.h"
17 #define INITIAL_THREAD_ID 0
21 /** @brief Constructor */
22 ModelChecker::ModelChecker(struct model_params params) :
23 /* Initialize default scheduler */
25 scheduler(new Scheduler()),
27 num_feasible_executions(0),
29 earliest_diverge(NULL),
30 action_trace(new action_list_t()),
31 thread_map(new HashTable<int, Thread *, int>()),
32 obj_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
33 lock_waiters_map(new HashTable<const void *, action_list_t, uintptr_t, 4>()),
34 obj_thrd_map(new HashTable<void *, std::vector<action_list_t>, uintptr_t, 4 >()),
35 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
36 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
37 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
38 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
39 node_stack(new NodeStack()),
40 mo_graph(new CycleGraph()),
41 failed_promise(false),
42 too_many_reads(false),
44 bad_synchronization(false)
46 /* Allocate this "size" on the snapshotting heap */
47 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
48 /* First thread created will have id INITIAL_THREAD_ID */
49 priv->next_thread_id = INITIAL_THREAD_ID;
51 /* Initialize a model-checker thread, for special ModelActions */
52 model_thread = new Thread(get_next_id());
53 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
56 /** @brief Destructor */
57 ModelChecker::~ModelChecker()
59 for (unsigned int i = 0; i < get_num_threads(); i++)
60 delete thread_map->get(i);
65 delete lock_waiters_map;
68 for (unsigned int i = 0; i < promises->size(); i++)
69 delete (*promises)[i];
72 delete pending_rel_seqs;
74 delete thrd_last_action;
81 * Restores user program to initial state and resets all model-checker data
84 void ModelChecker::reset_to_initial_state()
86 DEBUG("+++ Resetting to initial state +++\n");
87 node_stack->reset_execution();
88 failed_promise = false;
89 too_many_reads = false;
90 bad_synchronization = false;
92 snapshotObject->backTrackBeforeStep(0);
95 /** @return a thread ID for a new Thread */
96 thread_id_t ModelChecker::get_next_id()
98 return priv->next_thread_id++;
101 /** @return the number of user threads created during this execution */
102 unsigned int ModelChecker::get_num_threads()
104 return priv->next_thread_id;
107 /** @return The currently executing Thread. */
108 Thread * ModelChecker::get_current_thread()
110 return scheduler->get_current_thread();
113 /** @return a sequence number for a new ModelAction */
114 modelclock_t ModelChecker::get_next_seq_num()
116 return ++priv->used_sequence_numbers;
120 * @brief Choose the next thread to execute.
122 * This function chooses the next thread that should execute. It can force the
123 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
124 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
125 * The model-checker may have no preference regarding the next thread (i.e.,
126 * when exploring a new execution ordering), in which case this will return
128 * @param curr The current ModelAction. This action might guide the choice of
130 * @return The next thread to run. If the model-checker has no preference, NULL.
132 Thread * ModelChecker::get_next_thread(ModelAction *curr)
137 /* Do not split atomic actions. */
139 return thread_current();
140 /* The THREAD_CREATE action points to the created Thread */
141 else if (curr->get_type() == THREAD_CREATE)
142 return (Thread *)curr->get_location();
145 /* Have we completed exploring the preselected path? */
149 /* Else, we are trying to replay an execution */
150 ModelAction *next = node_stack->get_next()->get_action();
152 if (next == diverge) {
153 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
154 earliest_diverge=diverge;
156 Node *nextnode = next->get_node();
157 Node *prevnode = nextnode->get_parent();
158 scheduler->update_sleep_set(prevnode);
160 /* Reached divergence point */
161 if (nextnode->increment_promise()) {
162 /* The next node will try to satisfy a different set of promises. */
163 tid = next->get_tid();
164 node_stack->pop_restofstack(2);
165 } else if (nextnode->increment_read_from()) {
166 /* The next node will read from a different value. */
167 tid = next->get_tid();
168 node_stack->pop_restofstack(2);
169 } else if (nextnode->increment_future_value()) {
170 /* The next node will try to read from a different future value. */
171 tid = next->get_tid();
172 node_stack->pop_restofstack(2);
173 } else if (nextnode->increment_relseq_break()) {
174 /* The next node will try to resolve a release sequence differently */
175 tid = next->get_tid();
176 node_stack->pop_restofstack(2);
178 /* Make a different thread execute for next step */
179 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
180 tid = prevnode->get_next_backtrack();
181 /* Make sure the backtracked thread isn't sleeping. */
182 node_stack->pop_restofstack(1);
183 if (diverge==earliest_diverge) {
184 earliest_diverge=prevnode->get_action();
187 /* The correct sleep set is in the parent node. */
190 DEBUG("*** Divergence point ***\n");
194 tid = next->get_tid();
196 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
197 ASSERT(tid != THREAD_ID_T_NONE);
198 return thread_map->get(id_to_int(tid));
202 * We need to know what the next actions of all threads in the sleep
203 * set will be. This method computes them and stores the actions at
204 * the corresponding thread object's pending action.
207 void ModelChecker::execute_sleep_set() {
208 for(unsigned int i=0;i<get_num_threads();i++) {
209 thread_id_t tid=int_to_id(i);
210 Thread *thr=get_thread(tid);
211 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
212 thr->set_state(THREAD_RUNNING);
213 scheduler->next_thread(thr);
214 Thread::swap(&system_context, thr);
215 priv->current_action->set_sleep_flag();
216 thr->set_pending(priv->current_action);
219 priv->current_action = NULL;
222 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
223 for(unsigned int i=0;i<get_num_threads();i++) {
224 thread_id_t tid=int_to_id(i);
225 Thread *thr=get_thread(tid);
226 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
227 ModelAction *pending_act=thr->get_pending();
228 if (pending_act->could_synchronize_with(curr)) {
229 //Remove this thread from sleep set
230 scheduler->remove_sleep(thr);
237 * Queries the model-checker for more executions to explore and, if one
238 * exists, resets the model-checker state to execute a new execution.
240 * @return If there are more executions to explore, return true. Otherwise,
243 bool ModelChecker::next_execution()
249 if (isfinalfeasible()) {
250 printf("Earliest divergence point since last feasible execution:\n");
251 if (earliest_diverge)
252 earliest_diverge->print();
254 printf("(Not set)\n");
256 earliest_diverge = NULL;
257 num_feasible_executions++;
260 DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
261 pending_rel_seqs->size());
263 if (isfinalfeasible() || DBG_ENABLED())
266 if ((diverge = get_next_backtrack()) == NULL)
270 printf("Next execution will diverge at:\n");
274 reset_to_initial_state();
278 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
280 switch (act->get_type()) {
284 /* linear search: from most recent to oldest */
285 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
286 action_list_t::reverse_iterator rit;
287 for (rit = list->rbegin(); rit != list->rend(); rit++) {
288 ModelAction *prev = *rit;
289 if (prev->could_synchronize_with(act))
295 case ATOMIC_TRYLOCK: {
296 /* linear search: from most recent to oldest */
297 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
298 action_list_t::reverse_iterator rit;
299 for (rit = list->rbegin(); rit != list->rend(); rit++) {
300 ModelAction *prev = *rit;
301 if (act->is_conflicting_lock(prev))
306 case ATOMIC_UNLOCK: {
307 /* linear search: from most recent to oldest */
308 action_list_t *list = obj_map->get_safe_ptr(act->get_location());
309 action_list_t::reverse_iterator rit;
310 for (rit = list->rbegin(); rit != list->rend(); rit++) {
311 ModelAction *prev = *rit;
312 if (!act->same_thread(prev)&&prev->is_failed_trylock())
323 /** This method finds backtracking points where we should try to
324 * reorder the parameter ModelAction against.
326 * @param the ModelAction to find backtracking points for.
328 void ModelChecker::set_backtracking(ModelAction *act)
330 Thread *t = get_thread(act);
331 ModelAction * prev = get_last_conflict(act);
335 Node * node = prev->get_node()->get_parent();
337 int low_tid, high_tid;
338 if (node->is_enabled(t)) {
339 low_tid = id_to_int(act->get_tid());
340 high_tid = low_tid+1;
343 high_tid = get_num_threads();
346 for(int i = low_tid; i < high_tid; i++) {
347 thread_id_t tid = int_to_id(i);
349 /* Don't backtrack into a point where the thread is disabled or sleeping. */
350 if (node->get_enabled_array()[i]!=THREAD_ENABLED)
353 /* Check if this has been explored already */
354 if (node->has_been_explored(tid))
357 /* See if fairness allows */
358 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
360 for(int t=0;t<node->get_num_threads();t++) {
361 thread_id_t tother=int_to_id(t);
362 if (node->is_enabled(tother) && node->has_priority(tother)) {
370 /* Cache the latest backtracking point */
371 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
372 priv->next_backtrack = prev;
374 /* If this is a new backtracking point, mark the tree */
375 if (!node->set_backtrack(tid))
377 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
378 id_to_int(prev->get_tid()),
379 id_to_int(t->get_id()));
388 * Returns last backtracking point. The model checker will explore a different
389 * path for this point in the next execution.
390 * @return The ModelAction at which the next execution should diverge.
392 ModelAction * ModelChecker::get_next_backtrack()
394 ModelAction *next = priv->next_backtrack;
395 priv->next_backtrack = NULL;
400 * Processes a read or rmw model action.
401 * @param curr is the read model action to process.
402 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
403 * @return True if processing this read updates the mo_graph.
405 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
408 bool updated = false;
410 const ModelAction *reads_from = curr->get_node()->get_read_from();
411 if (reads_from != NULL) {
412 mo_graph->startChanges();
414 value = reads_from->get_value();
415 bool r_status = false;
417 if (!second_part_of_rmw) {
418 check_recency(curr, reads_from);
419 r_status = r_modification_order(curr, reads_from);
423 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
424 mo_graph->rollbackChanges();
425 too_many_reads = false;
429 curr->read_from(reads_from);
430 mo_graph->commitChanges();
431 mo_check_promises(curr->get_tid(), reads_from);
434 } else if (!second_part_of_rmw) {
435 /* Read from future value */
436 value = curr->get_node()->get_future_value();
437 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
438 curr->read_from(NULL);
439 Promise *valuepromise = new Promise(curr, value, expiration);
440 promises->push_back(valuepromise);
442 get_thread(curr)->set_return_value(value);
448 * Processes a lock, trylock, or unlock model action. @param curr is
449 * the read model action to process.
451 * The try lock operation checks whether the lock is taken. If not,
452 * it falls to the normal lock operation case. If so, it returns
455 * The lock operation has already been checked that it is enabled, so
456 * it just grabs the lock and synchronizes with the previous unlock.
458 * The unlock operation has to re-enable all of the threads that are
459 * waiting on the lock.
461 * @return True if synchronization was updated; false otherwise
463 bool ModelChecker::process_mutex(ModelAction *curr) {
464 std::mutex *mutex = (std::mutex *)curr->get_location();
465 struct std::mutex_state *state = mutex->get_state();
466 switch (curr->get_type()) {
467 case ATOMIC_TRYLOCK: {
468 bool success = !state->islocked;
469 curr->set_try_lock(success);
471 get_thread(curr)->set_return_value(0);
474 get_thread(curr)->set_return_value(1);
476 //otherwise fall into the lock case
478 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
479 printf("Lock access before initialization\n");
482 state->islocked = true;
483 ModelAction *unlock = get_last_unlock(curr);
484 //synchronize with the previous unlock statement
485 if (unlock != NULL) {
486 curr->synchronize_with(unlock);
491 case ATOMIC_UNLOCK: {
493 state->islocked = false;
494 //wake up the other threads
495 action_list_t *waiters = lock_waiters_map->get_safe_ptr(curr->get_location());
496 //activate all the waiting threads
497 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
498 scheduler->wake(get_thread(*rit));
510 * Process a write ModelAction
511 * @param curr The ModelAction to process
512 * @return True if the mo_graph was updated or promises were resolved
514 bool ModelChecker::process_write(ModelAction *curr)
516 bool updated_mod_order = w_modification_order(curr);
517 bool updated_promises = resolve_promises(curr);
519 if (promises->size() == 0) {
520 for (unsigned int i = 0; i < futurevalues->size(); i++) {
521 struct PendingFutureValue pfv = (*futurevalues)[i];
522 if (pfv.act->get_node()->add_future_value(pfv.value, pfv.expiration) &&
523 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
524 priv->next_backtrack = pfv.act;
526 futurevalues->resize(0);
529 mo_graph->commitChanges();
530 mo_check_promises(curr->get_tid(), curr);
532 get_thread(curr)->set_return_value(VALUE_NONE);
533 return updated_mod_order || updated_promises;
537 * @brief Process the current action for thread-related activity
539 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
540 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
541 * synchronization, etc. This function is a no-op for non-THREAD actions
542 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
544 * @param curr The current action
545 * @return True if synchronization was updated or a thread completed
547 bool ModelChecker::process_thread_action(ModelAction *curr)
549 bool updated = false;
551 switch (curr->get_type()) {
552 case THREAD_CREATE: {
553 Thread *th = (Thread *)curr->get_location();
554 th->set_creation(curr);
558 Thread *waiting, *blocking;
559 waiting = get_thread(curr);
560 blocking = (Thread *)curr->get_location();
561 if (!blocking->is_complete()) {
562 blocking->push_wait_list(curr);
563 scheduler->sleep(waiting);
565 do_complete_join(curr);
566 updated = true; /* trigger rel-seq checks */
570 case THREAD_FINISH: {
571 Thread *th = get_thread(curr);
572 while (!th->wait_list_empty()) {
573 ModelAction *act = th->pop_wait_list();
574 Thread *wake = get_thread(act);
575 scheduler->wake(wake);
576 do_complete_join(act);
577 updated = true; /* trigger rel-seq checks */
580 updated = true; /* trigger rel-seq checks */
584 check_promises(curr->get_tid(), NULL, curr->get_cv());
595 * @brief Process the current action for release sequence fixup activity
597 * Performs model-checker release sequence fixups for the current action,
598 * forcing a single pending release sequence to break (with a given, potential
599 * "loose" write) or to complete (i.e., synchronize). If a pending release
600 * sequence forms a complete release sequence, then we must perform the fixup
601 * synchronization, mo_graph additions, etc.
603 * @param curr The current action; must be a release sequence fixup action
604 * @param work_queue The work queue to which to add work items as they are
607 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
609 const ModelAction *write = curr->get_node()->get_relseq_break();
610 struct release_seq *sequence = pending_rel_seqs->back();
611 pending_rel_seqs->pop_back();
613 ModelAction *acquire = sequence->acquire;
614 const ModelAction *rf = sequence->rf;
615 const ModelAction *release = sequence->release;
619 ASSERT(release->same_thread(rf));
623 * @todo Forcing a synchronization requires that we set
624 * modification order constraints. For instance, we can't allow
625 * a fixup sequence in which two separate read-acquire
626 * operations read from the same sequence, where the first one
627 * synchronizes and the other doesn't. Essentially, we can't
628 * allow any writes to insert themselves between 'release' and
632 /* Must synchronize */
633 if (!acquire->synchronize_with(release)) {
634 set_bad_synchronization();
637 /* Re-check all pending release sequences */
638 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
639 /* Re-check act for mo_graph edges */
640 work_queue->push_back(MOEdgeWorkEntry(acquire));
642 /* propagate synchronization to later actions */
643 action_list_t::reverse_iterator rit = action_trace->rbegin();
644 for (; (*rit) != acquire; rit++) {
645 ModelAction *propagate = *rit;
646 if (acquire->happens_before(propagate)) {
647 propagate->synchronize_with(acquire);
648 /* Re-check 'propagate' for mo_graph edges */
649 work_queue->push_back(MOEdgeWorkEntry(propagate));
653 /* Break release sequence with new edges:
654 * release --mo--> write --mo--> rf */
655 mo_graph->addEdge(release, write);
656 mo_graph->addEdge(write, rf);
659 /* See if we have realized a data race */
660 if (checkDataRaces())
665 * Initialize the current action by performing one or more of the following
666 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
667 * in the NodeStack, manipulating backtracking sets, allocating and
668 * initializing clock vectors, and computing the promises to fulfill.
670 * @param curr The current action, as passed from the user context; may be
671 * freed/invalidated after the execution of this function
672 * @return The current action, as processed by the ModelChecker. Is only the
673 * same as the parameter @a curr if this is a newly-explored action.
675 ModelAction * ModelChecker::initialize_curr_action(ModelAction *curr)
677 ModelAction *newcurr;
679 if (curr->is_rmwc() || curr->is_rmw()) {
680 newcurr = process_rmw(curr);
683 if (newcurr->is_rmw())
684 compute_promises(newcurr);
688 curr->set_seq_number(get_next_seq_num());
690 newcurr = node_stack->explore_action(curr, scheduler->get_enabled());
692 /* First restore type and order in case of RMW operation */
694 newcurr->copy_typeandorder(curr);
696 ASSERT(curr->get_location() == newcurr->get_location());
697 newcurr->copy_from_new(curr);
699 /* Discard duplicate ModelAction; use action from NodeStack */
702 /* Always compute new clock vector */
703 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
707 /* Always compute new clock vector */
708 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
710 * Perform one-time actions when pushing new ModelAction onto
713 if (newcurr->is_write())
714 compute_promises(newcurr);
715 else if (newcurr->is_relseq_fixup())
716 compute_relseq_breakwrites(newcurr);
722 * This method checks whether a model action is enabled at the given point.
723 * At this point, it checks whether a lock operation would be successful at this point.
724 * If not, it puts the thread in a waiter list.
725 * @param curr is the ModelAction to check whether it is enabled.
726 * @return a bool that indicates whether the action is enabled.
728 bool ModelChecker::check_action_enabled(ModelAction *curr) {
729 if (curr->is_lock()) {
730 std::mutex * lock = (std::mutex *)curr->get_location();
731 struct std::mutex_state * state = lock->get_state();
732 if (state->islocked) {
733 //Stick the action in the appropriate waiting queue
734 lock_waiters_map->get_safe_ptr(curr->get_location())->push_back(curr);
743 * This is the heart of the model checker routine. It performs model-checking
744 * actions corresponding to a given "current action." Among other processes, it
745 * calculates reads-from relationships, updates synchronization clock vectors,
746 * forms a memory_order constraints graph, and handles replay/backtrack
747 * execution when running permutations of previously-observed executions.
749 * @param curr The current action to process
750 * @return The next Thread that must be executed. May be NULL if ModelChecker
751 * makes no choice (e.g., according to replay execution, combining RMW actions,
754 Thread * ModelChecker::check_current_action(ModelAction *curr)
757 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
759 if (!check_action_enabled(curr)) {
760 /* Make the execution look like we chose to run this action
761 * much later, when a lock is actually available to release */
762 get_current_thread()->set_pending(curr);
763 scheduler->sleep(get_current_thread());
764 return get_next_thread(NULL);
767 wake_up_sleeping_actions(curr);
769 ModelAction *newcurr = initialize_curr_action(curr);
772 /* Add the action to lists before any other model-checking tasks */
773 if (!second_part_of_rmw)
774 add_action_to_lists(newcurr);
776 /* Build may_read_from set for newly-created actions */
777 if (curr == newcurr && curr->is_read())
778 build_reads_from_past(curr);
781 /* Initialize work_queue with the "current action" work */
782 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
783 while (!work_queue.empty()) {
784 WorkQueueEntry work = work_queue.front();
785 work_queue.pop_front();
788 case WORK_CHECK_CURR_ACTION: {
789 ModelAction *act = work.action;
790 bool update = false; /* update this location's release seq's */
791 bool update_all = false; /* update all release seq's */
793 if (process_thread_action(curr))
796 if (act->is_read() && process_read(act, second_part_of_rmw))
799 if (act->is_write() && process_write(act))
802 if (act->is_mutex_op() && process_mutex(act))
805 if (act->is_relseq_fixup())
806 process_relseq_fixup(curr, &work_queue);
809 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
811 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
814 case WORK_CHECK_RELEASE_SEQ:
815 resolve_release_sequences(work.location, &work_queue);
817 case WORK_CHECK_MO_EDGES: {
818 /** @todo Complete verification of work_queue */
819 ModelAction *act = work.action;
820 bool updated = false;
822 if (act->is_read()) {
823 const ModelAction *rf = act->get_reads_from();
824 if (rf != NULL && r_modification_order(act, rf))
827 if (act->is_write()) {
828 if (w_modification_order(act))
831 mo_graph->commitChanges();
834 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
843 check_curr_backtracking(curr);
844 set_backtracking(curr);
845 return get_next_thread(curr);
849 * Complete a THREAD_JOIN operation, by synchronizing with the THREAD_FINISH
850 * operation from the Thread it is joining with. Must be called after the
851 * completion of the Thread in question.
852 * @param join The THREAD_JOIN action
854 void ModelChecker::do_complete_join(ModelAction *join)
856 Thread *blocking = (Thread *)join->get_location();
857 ModelAction *act = get_last_action(blocking->get_id());
858 join->synchronize_with(act);
861 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
862 Node *currnode = curr->get_node();
863 Node *parnode = currnode->get_parent();
865 if ((!parnode->backtrack_empty() ||
866 !currnode->read_from_empty() ||
867 !currnode->future_value_empty() ||
868 !currnode->promise_empty() ||
869 !currnode->relseq_break_empty())
870 && (!priv->next_backtrack ||
871 *curr > *priv->next_backtrack)) {
872 priv->next_backtrack = curr;
876 bool ModelChecker::promises_expired() {
877 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
878 Promise *promise = (*promises)[promise_index];
879 if (promise->get_expiration()<priv->used_sequence_numbers) {
886 /** @return whether the current partial trace must be a prefix of a
888 bool ModelChecker::isfeasibleprefix() {
889 return promises->size() == 0 && pending_rel_seqs->size() == 0;
892 /** @return whether the current partial trace is feasible. */
893 bool ModelChecker::isfeasible() {
894 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
895 DEBUG("Infeasible: RMW violation\n");
897 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
900 /** @return whether the current partial trace is feasible other than
901 * multiple RMW reading from the same store. */
902 bool ModelChecker::isfeasibleotherthanRMW() {
904 if (mo_graph->checkForCycles())
905 DEBUG("Infeasible: modification order cycles\n");
907 DEBUG("Infeasible: failed promise\n");
909 DEBUG("Infeasible: too many reads\n");
910 if (bad_synchronization)
911 DEBUG("Infeasible: bad synchronization ordering\n");
912 if (promises_expired())
913 DEBUG("Infeasible: promises expired\n");
915 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
918 /** Returns whether the current completed trace is feasible. */
919 bool ModelChecker::isfinalfeasible() {
920 if (DBG_ENABLED() && promises->size() != 0)
921 DEBUG("Infeasible: unrevolved promises\n");
923 return isfeasible() && promises->size() == 0;
926 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
927 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
928 ModelAction *lastread = get_last_action(act->get_tid());
929 lastread->process_rmw(act);
930 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
931 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
932 mo_graph->commitChanges();
938 * Checks whether a thread has read from the same write for too many times
939 * without seeing the effects of a later write.
942 * 1) there must a different write that we could read from that would satisfy the modification order,
943 * 2) we must have read from the same value in excess of maxreads times, and
944 * 3) that other write must have been in the reads_from set for maxreads times.
946 * If so, we decide that the execution is no longer feasible.
948 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
949 if (params.maxreads != 0) {
951 if (curr->get_node()->get_read_from_size() <= 1)
953 //Must make sure that execution is currently feasible... We could
954 //accidentally clear by rolling back
957 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
958 int tid = id_to_int(curr->get_tid());
961 if ((int)thrd_lists->size() <= tid)
963 action_list_t *list = &(*thrd_lists)[tid];
965 action_list_t::reverse_iterator rit = list->rbegin();
967 for (; (*rit) != curr; rit++)
969 /* go past curr now */
972 action_list_t::reverse_iterator ritcopy = rit;
973 //See if we have enough reads from the same value
975 for (; count < params.maxreads; rit++,count++) {
976 if (rit==list->rend())
978 ModelAction *act = *rit;
982 if (act->get_reads_from() != rf)
984 if (act->get_node()->get_read_from_size() <= 1)
987 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
989 const ModelAction * write = curr->get_node()->get_read_from_at(i);
991 //Need a different write
995 /* Test to see whether this is a feasible write to read from*/
996 mo_graph->startChanges();
997 r_modification_order(curr, write);
998 bool feasiblereadfrom = isfeasible();
999 mo_graph->rollbackChanges();
1001 if (!feasiblereadfrom)
1005 bool feasiblewrite = true;
1006 //new we need to see if this write works for everyone
1008 for (int loop = count; loop>0; loop--,rit++) {
1009 ModelAction *act=*rit;
1010 bool foundvalue = false;
1011 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1012 if (act->get_node()->get_read_from_at(i)==write) {
1018 feasiblewrite = false;
1022 if (feasiblewrite) {
1023 too_many_reads = true;
1031 * Updates the mo_graph with the constraints imposed from the current
1034 * Basic idea is the following: Go through each other thread and find
1035 * the lastest action that happened before our read. Two cases:
1037 * (1) The action is a write => that write must either occur before
1038 * the write we read from or be the write we read from.
1040 * (2) The action is a read => the write that that action read from
1041 * must occur before the write we read from or be the same write.
1043 * @param curr The current action. Must be a read.
1044 * @param rf The action that curr reads from. Must be a write.
1045 * @return True if modification order edges were added; false otherwise
1047 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1049 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1052 ASSERT(curr->is_read());
1054 /* Iterate over all threads */
1055 for (i = 0; i < thrd_lists->size(); i++) {
1056 /* Iterate over actions in thread, starting from most recent */
1057 action_list_t *list = &(*thrd_lists)[i];
1058 action_list_t::reverse_iterator rit;
1059 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1060 ModelAction *act = *rit;
1063 * Include at most one act per-thread that "happens
1064 * before" curr. Don't consider reflexively.
1066 if (act->happens_before(curr) && act != curr) {
1067 if (act->is_write()) {
1069 mo_graph->addEdge(act, rf);
1073 const ModelAction *prevreadfrom = act->get_reads_from();
1074 //if the previous read is unresolved, keep going...
1075 if (prevreadfrom == NULL)
1078 if (rf != prevreadfrom) {
1079 mo_graph->addEdge(prevreadfrom, rf);
1091 /** This method fixes up the modification order when we resolve a
1092 * promises. The basic problem is that actions that occur after the
1093 * read curr could not property add items to the modification order
1096 * So for each thread, we find the earliest item that happens after
1097 * the read curr. This is the item we have to fix up with additional
1098 * constraints. If that action is write, we add a MO edge between
1099 * the Action rf and that action. If the action is a read, we add a
1100 * MO edge between the Action rf, and whatever the read accessed.
1102 * @param curr is the read ModelAction that we are fixing up MO edges for.
1103 * @param rf is the write ModelAction that curr reads from.
1106 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1108 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1110 ASSERT(curr->is_read());
1112 /* Iterate over all threads */
1113 for (i = 0; i < thrd_lists->size(); i++) {
1114 /* Iterate over actions in thread, starting from most recent */
1115 action_list_t *list = &(*thrd_lists)[i];
1116 action_list_t::reverse_iterator rit;
1117 ModelAction *lastact = NULL;
1119 /* Find last action that happens after curr that is either not curr or a rmw */
1120 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1121 ModelAction *act = *rit;
1122 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1128 /* Include at most one act per-thread that "happens before" curr */
1129 if (lastact != NULL) {
1130 if (lastact==curr) {
1131 //Case 1: The resolved read is a RMW, and we need to make sure
1132 //that the write portion of the RMW mod order after rf
1134 mo_graph->addEdge(rf, lastact);
1135 } else if (lastact->is_read()) {
1136 //Case 2: The resolved read is a normal read and the next
1137 //operation is a read, and we need to make sure the value read
1138 //is mod ordered after rf
1140 const ModelAction *postreadfrom = lastact->get_reads_from();
1141 if (postreadfrom != NULL&&rf != postreadfrom)
1142 mo_graph->addEdge(rf, postreadfrom);
1144 //Case 3: The resolved read is a normal read and the next
1145 //operation is a write, and we need to make sure that the
1146 //write is mod ordered after rf
1148 mo_graph->addEdge(rf, lastact);
1156 * Updates the mo_graph with the constraints imposed from the current write.
1158 * Basic idea is the following: Go through each other thread and find
1159 * the lastest action that happened before our write. Two cases:
1161 * (1) The action is a write => that write must occur before
1164 * (2) The action is a read => the write that that action read from
1165 * must occur before the current write.
1167 * This method also handles two other issues:
1169 * (I) Sequential Consistency: Making sure that if the current write is
1170 * seq_cst, that it occurs after the previous seq_cst write.
1172 * (II) Sending the write back to non-synchronizing reads.
1174 * @param curr The current action. Must be a write.
1175 * @return True if modification order edges were added; false otherwise
1177 bool ModelChecker::w_modification_order(ModelAction *curr)
1179 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1182 ASSERT(curr->is_write());
1184 if (curr->is_seqcst()) {
1185 /* We have to at least see the last sequentially consistent write,
1186 so we are initialized. */
1187 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1188 if (last_seq_cst != NULL) {
1189 mo_graph->addEdge(last_seq_cst, curr);
1194 /* Iterate over all threads */
1195 for (i = 0; i < thrd_lists->size(); i++) {
1196 /* Iterate over actions in thread, starting from most recent */
1197 action_list_t *list = &(*thrd_lists)[i];
1198 action_list_t::reverse_iterator rit;
1199 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1200 ModelAction *act = *rit;
1203 * 1) If RMW and it actually read from something, then we
1204 * already have all relevant edges, so just skip to next
1207 * 2) If RMW and it didn't read from anything, we should
1208 * whatever edge we can get to speed up convergence.
1210 * 3) If normal write, we need to look at earlier actions, so
1211 * continue processing list.
1213 if (curr->is_rmw()) {
1214 if (curr->get_reads_from()!=NULL)
1223 * Include at most one act per-thread that "happens
1226 if (act->happens_before(curr)) {
1228 * Note: if act is RMW, just add edge:
1230 * The following edge should be handled elsewhere:
1231 * readfrom(act) --mo--> act
1233 if (act->is_write())
1234 mo_graph->addEdge(act, curr);
1235 else if (act->is_read()) {
1236 //if previous read accessed a null, just keep going
1237 if (act->get_reads_from() == NULL)
1239 mo_graph->addEdge(act->get_reads_from(), curr);
1243 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1244 !act->same_thread(curr)) {
1245 /* We have an action that:
1246 (1) did not happen before us
1247 (2) is a read and we are a write
1248 (3) cannot synchronize with us
1249 (4) is in a different thread
1251 that read could potentially read from our write.
1253 if (thin_air_constraint_may_allow(curr, act)) {
1255 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1256 struct PendingFutureValue pfv = {curr->get_value(),curr->get_seq_number()+params.maxfuturedelay,act};
1257 futurevalues->push_back(pfv);
1267 /** Arbitrary reads from the future are not allowed. Section 29.3
1268 * part 9 places some constraints. This method checks one result of constraint
1269 * constraint. Others require compiler support. */
1270 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1271 if (!writer->is_rmw())
1274 if (!reader->is_rmw())
1277 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1278 if (search == reader)
1280 if (search->get_tid() == reader->get_tid() &&
1281 search->happens_before(reader))
1289 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1290 * The ModelAction under consideration is expected to be taking part in
1291 * release/acquire synchronization as an object of the "reads from" relation.
1292 * Note that this can only provide release sequence support for RMW chains
1293 * which do not read from the future, as those actions cannot be traced until
1294 * their "promise" is fulfilled. Similarly, we may not even establish the
1295 * presence of a release sequence with certainty, as some modification order
1296 * constraints may be decided further in the future. Thus, this function
1297 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1298 * and a boolean representing certainty.
1300 * @param rf The action that might be part of a release sequence. Must be a
1302 * @param release_heads A pass-by-reference style return parameter. After
1303 * execution of this function, release_heads will contain the heads of all the
1304 * relevant release sequences, if any exists with certainty
1305 * @param pending A pass-by-reference style return parameter which is only used
1306 * when returning false (i.e., uncertain). Returns most information regarding
1307 * an uncertain release sequence, including any write operations that might
1308 * break the sequence.
1309 * @return true, if the ModelChecker is certain that release_heads is complete;
1312 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1313 rel_heads_list_t *release_heads,
1314 struct release_seq *pending) const
1316 /* Only check for release sequences if there are no cycles */
1317 if (mo_graph->checkForCycles())
1321 ASSERT(rf->is_write());
1323 if (rf->is_release())
1324 release_heads->push_back(rf);
1326 break; /* End of RMW chain */
1328 /** @todo Need to be smarter here... In the linux lock
1329 * example, this will run to the beginning of the program for
1331 /** @todo The way to be smarter here is to keep going until 1
1332 * thread has a release preceded by an acquire and you've seen
1335 /* acq_rel RMW is a sufficient stopping condition */
1336 if (rf->is_acquire() && rf->is_release())
1337 return true; /* complete */
1339 rf = rf->get_reads_from();
1342 /* read from future: need to settle this later */
1344 return false; /* incomplete */
1347 if (rf->is_release())
1348 return true; /* complete */
1350 /* else relaxed write; check modification order for contiguous subsequence
1351 * -> rf must be same thread as release */
1352 int tid = id_to_int(rf->get_tid());
1353 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(rf->get_location());
1354 action_list_t *list = &(*thrd_lists)[tid];
1355 action_list_t::const_reverse_iterator rit;
1357 /* Find rf in the thread list */
1358 rit = std::find(list->rbegin(), list->rend(), rf);
1359 ASSERT(rit != list->rend());
1361 /* Find the last write/release */
1362 for (; rit != list->rend(); rit++)
1363 if ((*rit)->is_release())
1365 if (rit == list->rend()) {
1366 /* No write-release in this thread */
1367 return true; /* complete */
1369 ModelAction *release = *rit;
1371 ASSERT(rf->same_thread(release));
1373 pending->writes.clear();
1375 bool certain = true;
1376 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1377 if (id_to_int(rf->get_tid()) == (int)i)
1379 list = &(*thrd_lists)[i];
1381 /* Can we ensure no future writes from this thread may break
1382 * the release seq? */
1383 bool future_ordered = false;
1385 ModelAction *last = get_last_action(int_to_id(i));
1386 Thread *th = get_thread(int_to_id(i));
1387 if ((last && rf->happens_before(last)) ||
1388 !scheduler->is_enabled(th) ||
1390 future_ordered = true;
1392 ASSERT(!th->is_model_thread() || future_ordered);
1394 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1395 const ModelAction *act = *rit;
1396 /* Reach synchronization -> this thread is complete */
1397 if (act->happens_before(release))
1399 if (rf->happens_before(act)) {
1400 future_ordered = true;
1404 /* Only writes can break release sequences */
1405 if (!act->is_write())
1408 /* Check modification order */
1409 if (mo_graph->checkReachable(rf, act)) {
1410 /* rf --mo--> act */
1411 future_ordered = true;
1414 if (mo_graph->checkReachable(act, release))
1415 /* act --mo--> release */
1417 if (mo_graph->checkReachable(release, act) &&
1418 mo_graph->checkReachable(act, rf)) {
1419 /* release --mo-> act --mo--> rf */
1420 return true; /* complete */
1422 /* act may break release sequence */
1423 pending->writes.push_back(act);
1426 if (!future_ordered)
1427 certain = false; /* This thread is uncertain */
1431 release_heads->push_back(release);
1432 pending->writes.clear();
1434 pending->release = release;
1441 * A public interface for getting the release sequence head(s) with which a
1442 * given ModelAction must synchronize. This function only returns a non-empty
1443 * result when it can locate a release sequence head with certainty. Otherwise,
1444 * it may mark the internal state of the ModelChecker so that it will handle
1445 * the release sequence at a later time, causing @a act to update its
1446 * synchronization at some later point in execution.
1447 * @param act The 'acquire' action that may read from a release sequence
1448 * @param release_heads A pass-by-reference return parameter. Will be filled
1449 * with the head(s) of the release sequence(s), if they exists with certainty.
1450 * @see ModelChecker::release_seq_heads
1452 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1454 const ModelAction *rf = act->get_reads_from();
1455 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1456 sequence->acquire = act;
1458 if (!release_seq_heads(rf, release_heads, sequence)) {
1459 /* add act to 'lazy checking' list */
1460 pending_rel_seqs->push_back(sequence);
1462 snapshot_free(sequence);
1467 * Attempt to resolve all stashed operations that might synchronize with a
1468 * release sequence for a given location. This implements the "lazy" portion of
1469 * determining whether or not a release sequence was contiguous, since not all
1470 * modification order information is present at the time an action occurs.
1472 * @param location The location/object that should be checked for release
1473 * sequence resolutions. A NULL value means to check all locations.
1474 * @param work_queue The work queue to which to add work items as they are
1476 * @return True if any updates occurred (new synchronization, new mo_graph
1479 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1481 bool updated = false;
1482 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1483 while (it != pending_rel_seqs->end()) {
1484 struct release_seq *pending = *it;
1485 ModelAction *act = pending->acquire;
1487 /* Only resolve sequences on the given location, if provided */
1488 if (location && act->get_location() != location) {
1493 const ModelAction *rf = act->get_reads_from();
1494 rel_heads_list_t release_heads;
1496 complete = release_seq_heads(rf, &release_heads, pending);
1497 for (unsigned int i = 0; i < release_heads.size(); i++) {
1498 if (!act->has_synchronized_with(release_heads[i])) {
1499 if (act->synchronize_with(release_heads[i]))
1502 set_bad_synchronization();
1507 /* Re-check all pending release sequences */
1508 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1509 /* Re-check act for mo_graph edges */
1510 work_queue->push_back(MOEdgeWorkEntry(act));
1512 /* propagate synchronization to later actions */
1513 action_list_t::reverse_iterator rit = action_trace->rbegin();
1514 for (; (*rit) != act; rit++) {
1515 ModelAction *propagate = *rit;
1516 if (act->happens_before(propagate)) {
1517 propagate->synchronize_with(act);
1518 /* Re-check 'propagate' for mo_graph edges */
1519 work_queue->push_back(MOEdgeWorkEntry(propagate));
1524 it = pending_rel_seqs->erase(it);
1525 snapshot_free(pending);
1531 // If we resolved promises or data races, see if we have realized a data race.
1532 if (checkDataRaces()) {
1540 * Performs various bookkeeping operations for the current ModelAction. For
1541 * instance, adds action to the per-object, per-thread action vector and to the
1542 * action trace list of all thread actions.
1544 * @param act is the ModelAction to add.
1546 void ModelChecker::add_action_to_lists(ModelAction *act)
1548 int tid = id_to_int(act->get_tid());
1549 action_trace->push_back(act);
1551 obj_map->get_safe_ptr(act->get_location())->push_back(act);
1553 std::vector<action_list_t> *vec = obj_thrd_map->get_safe_ptr(act->get_location());
1554 if (tid >= (int)vec->size())
1555 vec->resize(priv->next_thread_id);
1556 (*vec)[tid].push_back(act);
1558 if ((int)thrd_last_action->size() <= tid)
1559 thrd_last_action->resize(get_num_threads());
1560 (*thrd_last_action)[tid] = act;
1564 * @brief Get the last action performed by a particular Thread
1565 * @param tid The thread ID of the Thread in question
1566 * @return The last action in the thread
1568 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1570 int threadid = id_to_int(tid);
1571 if (threadid < (int)thrd_last_action->size())
1572 return (*thrd_last_action)[id_to_int(tid)];
1578 * Gets the last memory_order_seq_cst write (in the total global sequence)
1579 * performed on a particular object (i.e., memory location), not including the
1581 * @param curr The current ModelAction; also denotes the object location to
1583 * @return The last seq_cst write
1585 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1587 void *location = curr->get_location();
1588 action_list_t *list = obj_map->get_safe_ptr(location);
1589 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1590 action_list_t::reverse_iterator rit;
1591 for (rit = list->rbegin(); rit != list->rend(); rit++)
1592 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1598 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1599 * location). This function identifies the mutex according to the current
1600 * action, which is presumed to perform on the same mutex.
1601 * @param curr The current ModelAction; also denotes the object location to
1603 * @return The last unlock operation
1605 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1607 void *location = curr->get_location();
1608 action_list_t *list = obj_map->get_safe_ptr(location);
1609 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1610 action_list_t::reverse_iterator rit;
1611 for (rit = list->rbegin(); rit != list->rend(); rit++)
1612 if ((*rit)->is_unlock())
1617 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1619 ModelAction *parent = get_last_action(tid);
1621 parent = get_thread(tid)->get_creation();
1626 * Returns the clock vector for a given thread.
1627 * @param tid The thread whose clock vector we want
1628 * @return Desired clock vector
1630 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1632 return get_parent_action(tid)->get_cv();
1636 * Resolve a set of Promises with a current write. The set is provided in the
1637 * Node corresponding to @a write.
1638 * @param write The ModelAction that is fulfilling Promises
1639 * @return True if promises were resolved; false otherwise
1641 bool ModelChecker::resolve_promises(ModelAction *write)
1643 bool resolved = false;
1644 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1646 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1647 Promise *promise = (*promises)[promise_index];
1648 if (write->get_node()->get_promise(i)) {
1649 ModelAction *read = promise->get_action();
1650 if (read->is_rmw()) {
1651 mo_graph->addRMWEdge(write, read);
1653 read->read_from(write);
1654 //First fix up the modification order for actions that happened
1656 r_modification_order(read, write);
1657 //Next fix up the modification order for actions that happened
1659 post_r_modification_order(read, write);
1660 //Make sure the promise's value matches the write's value
1661 ASSERT(promise->get_value() == write->get_value());
1664 promises->erase(promises->begin() + promise_index);
1665 threads_to_check.push_back(read->get_tid());
1672 //Check whether reading these writes has made threads unable to
1675 for(unsigned int i=0;i<threads_to_check.size();i++)
1676 mo_check_promises(threads_to_check[i], write);
1682 * Compute the set of promises that could potentially be satisfied by this
1683 * action. Note that the set computation actually appears in the Node, not in
1685 * @param curr The ModelAction that may satisfy promises
1687 void ModelChecker::compute_promises(ModelAction *curr)
1689 for (unsigned int i = 0; i < promises->size(); i++) {
1690 Promise *promise = (*promises)[i];
1691 const ModelAction *act = promise->get_action();
1692 if (!act->happens_before(curr) &&
1694 !act->could_synchronize_with(curr) &&
1695 !act->same_thread(curr) &&
1696 promise->get_value() == curr->get_value()) {
1697 curr->get_node()->set_promise(i);
1702 /** Checks promises in response to change in ClockVector Threads. */
1703 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1705 for (unsigned int i = 0; i < promises->size(); i++) {
1706 Promise *promise = (*promises)[i];
1707 const ModelAction *act = promise->get_action();
1708 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1709 merge_cv->synchronized_since(act)) {
1710 if (promise->increment_threads(tid)) {
1711 //Promise has failed
1712 failed_promise = true;
1719 /** Checks promises in response to addition to modification order for threads.
1721 * pthread is the thread that performed the read that created the promise
1723 * pread is the read that created the promise
1725 * pwrite is either the first write to same location as pread by
1726 * pthread that is sequenced after pread or the value read by the
1727 * first read to the same lcoation as pread by pthread that is
1728 * sequenced after pread..
1730 * 1. If tid=pthread, then we check what other threads are reachable
1731 * through the mode order starting with pwrite. Those threads cannot
1732 * perform a write that will resolve the promise due to modification
1733 * order constraints.
1735 * 2. If the tid is not pthread, we check whether pwrite can reach the
1736 * action write through the modification order. If so, that thread
1737 * cannot perform a future write that will resolve the promise due to
1738 * modificatin order constraints.
1740 * @parem tid The thread that either read from the model action
1741 * write, or actually did the model action write.
1743 * @parem write The ModelAction representing the relevant write.
1746 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1747 void * location = write->get_location();
1748 for (unsigned int i = 0; i < promises->size(); i++) {
1749 Promise *promise = (*promises)[i];
1750 const ModelAction *act = promise->get_action();
1752 //Is this promise on the same location?
1753 if ( act->get_location() != location )
1756 //same thread as the promise
1757 if ( act->get_tid()==tid ) {
1759 //do we have a pwrite for the promise, if not, set it
1760 if (promise->get_write() == NULL ) {
1761 promise->set_write(write);
1763 if (mo_graph->checkPromise(write, promise)) {
1764 failed_promise = true;
1769 //Don't do any lookups twice for the same thread
1770 if (promise->has_sync_thread(tid))
1773 if (mo_graph->checkReachable(promise->get_write(), write)) {
1774 if (promise->increment_threads(tid)) {
1775 failed_promise = true;
1783 * Compute the set of writes that may break the current pending release
1784 * sequence. This information is extracted from previou release sequence
1787 * @param curr The current ModelAction. Must be a release sequence fixup
1790 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
1792 if (pending_rel_seqs->empty())
1795 struct release_seq *pending = pending_rel_seqs->back();
1796 for (unsigned int i = 0; i < pending->writes.size(); i++) {
1797 const ModelAction *write = pending->writes[i];
1798 curr->get_node()->add_relseq_break(write);
1801 /* NULL means don't break the sequence; just synchronize */
1802 curr->get_node()->add_relseq_break(NULL);
1806 * Build up an initial set of all past writes that this 'read' action may read
1807 * from. This set is determined by the clock vector's "happens before"
1809 * @param curr is the current ModelAction that we are exploring; it must be a
1812 void ModelChecker::build_reads_from_past(ModelAction *curr)
1814 std::vector<action_list_t> *thrd_lists = obj_thrd_map->get_safe_ptr(curr->get_location());
1816 ASSERT(curr->is_read());
1818 ModelAction *last_seq_cst = NULL;
1820 /* Track whether this object has been initialized */
1821 bool initialized = false;
1823 if (curr->is_seqcst()) {
1824 last_seq_cst = get_last_seq_cst(curr);
1825 /* We have to at least see the last sequentially consistent write,
1826 so we are initialized. */
1827 if (last_seq_cst != NULL)
1831 /* Iterate over all threads */
1832 for (i = 0; i < thrd_lists->size(); i++) {
1833 /* Iterate over actions in thread, starting from most recent */
1834 action_list_t *list = &(*thrd_lists)[i];
1835 action_list_t::reverse_iterator rit;
1836 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1837 ModelAction *act = *rit;
1839 /* Only consider 'write' actions */
1840 if (!act->is_write() || act == curr)
1843 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1844 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
1845 DEBUG("Adding action to may_read_from:\n");
1846 if (DBG_ENABLED()) {
1851 if (curr->get_sleep_flag()) {
1852 if (sleep_can_read_from(curr, act))
1853 curr->get_node()->add_read_from(act);
1855 curr->get_node()->add_read_from(act);
1858 /* Include at most one act per-thread that "happens before" curr */
1859 if (act->happens_before(curr)) {
1867 /** @todo Need a more informative way of reporting errors. */
1868 printf("ERROR: may read from uninitialized atomic\n");
1871 if (DBG_ENABLED() || !initialized) {
1872 printf("Reached read action:\n");
1874 printf("Printing may_read_from\n");
1875 curr->get_node()->print_may_read_from();
1876 printf("End printing may_read_from\n");
1879 ASSERT(initialized);
1882 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
1884 Node *prevnode=write->get_node()->get_parent();
1885 bool thread_sleep=prevnode->get_enabled_array()[id_to_int(curr->get_tid())]==THREAD_SLEEP_SET;
1886 if (write->is_release()&&thread_sleep)
1888 if (!write->is_rmw())
1890 if (write->get_reads_from()==NULL)
1892 write=write->get_reads_from();
1896 static void print_list(action_list_t *list)
1898 action_list_t::iterator it;
1900 printf("---------------------------------------------------------------------\n");
1903 for (it = list->begin(); it != list->end(); it++) {
1906 printf("---------------------------------------------------------------------\n");
1909 #if SUPPORT_MOD_ORDER_DUMP
1910 void ModelChecker::dumpGraph(char *filename) {
1912 sprintf(buffer, "%s.dot",filename);
1913 FILE *file=fopen(buffer, "w");
1914 fprintf(file, "digraph %s {\n",filename);
1915 mo_graph->dumpNodes(file);
1916 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
1918 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
1919 ModelAction *action=*it;
1920 if (action->is_read()) {
1921 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
1922 if (action->get_reads_from()!=NULL)
1923 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
1925 if (thread_array[action->get_tid()] != NULL) {
1926 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
1929 thread_array[action->get_tid()]=action;
1931 fprintf(file,"}\n");
1932 model_free(thread_array);
1937 void ModelChecker::print_summary()
1940 printf("Number of executions: %d\n", num_executions);
1941 printf("Number of feasible executions: %d\n", num_feasible_executions);
1942 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
1944 #if SUPPORT_MOD_ORDER_DUMP
1946 char buffername[100];
1947 sprintf(buffername, "exec%04u", num_executions);
1948 mo_graph->dumpGraphToFile(buffername);
1949 sprintf(buffername, "graph%04u", num_executions);
1950 dumpGraph(buffername);
1953 if (!isfinalfeasible())
1954 printf("INFEASIBLE EXECUTION!\n");
1955 print_list(action_trace);
1960 * Add a Thread to the system for the first time. Should only be called once
1962 * @param t The Thread to add
1964 void ModelChecker::add_thread(Thread *t)
1966 thread_map->put(id_to_int(t->get_id()), t);
1967 scheduler->add_thread(t);
1971 * Removes a thread from the scheduler.
1972 * @param the thread to remove.
1974 void ModelChecker::remove_thread(Thread *t)
1976 scheduler->remove_thread(t);
1980 * @brief Get a Thread reference by its ID
1981 * @param tid The Thread's ID
1982 * @return A Thread reference
1984 Thread * ModelChecker::get_thread(thread_id_t tid) const
1986 return thread_map->get(id_to_int(tid));
1990 * @brief Get a reference to the Thread in which a ModelAction was executed
1991 * @param act The ModelAction
1992 * @return A Thread reference
1994 Thread * ModelChecker::get_thread(ModelAction *act) const
1996 return get_thread(act->get_tid());
2000 * Switch from a user-context to the "master thread" context (a.k.a. system
2001 * context). This switch is made with the intention of exploring a particular
2002 * model-checking action (described by a ModelAction object). Must be called
2003 * from a user-thread context.
2005 * @param act The current action that will be explored. May be NULL only if
2006 * trace is exiting via an assertion (see ModelChecker::set_assert and
2007 * ModelChecker::has_asserted).
2008 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2010 int ModelChecker::switch_to_master(ModelAction *act)
2013 Thread *old = thread_current();
2014 set_current_action(act);
2015 old->set_state(THREAD_READY);
2016 return Thread::swap(old, &system_context);
2020 * Takes the next step in the execution, if possible.
2021 * @return Returns true (success) if a step was taken and false otherwise.
2023 bool ModelChecker::take_step() {
2027 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2029 if (curr->get_state() == THREAD_READY) {
2030 ASSERT(priv->current_action);
2032 priv->nextThread = check_current_action(priv->current_action);
2033 priv->current_action = NULL;
2035 if (curr->is_blocked() || curr->is_complete())
2036 scheduler->remove_thread(curr);
2041 Thread *next = scheduler->next_thread(priv->nextThread);
2043 /* Infeasible -> don't take any more steps */
2047 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2048 next ? id_to_int(next->get_id()) : -1);
2051 * Launch end-of-execution release sequence fixups only when there are:
2053 * (1) no more user threads to run (or when execution replay chooses
2054 * the 'model_thread')
2055 * (2) pending release sequences
2056 * (3) pending assertions (i.e., data races)
2057 * (4) no pending promises
2059 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2060 isfinalfeasible() && !unrealizedraces.empty()) {
2061 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2062 pending_rel_seqs->size());
2063 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2064 std::memory_order_seq_cst, NULL, VALUE_NONE,
2066 set_current_action(fixup);
2070 /* next == NULL -> don't take any more steps */
2074 next->set_state(THREAD_RUNNING);
2076 if (next->get_pending() != NULL) {
2077 /* restart a pending action */
2078 set_current_action(next->get_pending());
2079 next->set_pending(NULL);
2080 next->set_state(THREAD_READY);
2084 /* Return false only if swap fails with an error */
2085 return (Thread::swap(&system_context, next) == 0);
2088 /** Runs the current execution until threre are no more steps to take. */
2089 void ModelChecker::finish_execution() {
2092 while (take_step());