10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
42 /* First thread created will have id INITIAL_THREAD_ID */
43 next_thread_id(INITIAL_THREAD_ID),
44 used_sequence_numbers(0),
48 failed_promise(false),
49 too_many_reads(false),
50 no_valid_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 unsigned int next_thread_id;
62 modelclock_t used_sequence_numbers;
63 ModelAction *next_backtrack;
64 SnapVector<bug_message *> bugs;
65 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
88 promises(new SnapVector<Promise *>()),
89 futurevalues(new SnapVector<struct PendingFutureValue>()),
90 pending_rel_seqs(new SnapVector<struct release_seq *>()),
91 thrd_last_action(new SnapVector<ModelAction *>(1)),
92 thrd_last_fence_release(new SnapVector<ModelAction *>()),
93 node_stack(new NodeStack()),
94 priv(new struct model_snapshot_members()),
95 mo_graph(new CycleGraph())
97 /* Initialize a model-checker thread, for special ModelActions */
98 model_thread = new Thread(get_next_id());
99 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
102 /** @brief Destructor */
103 ModelChecker::~ModelChecker()
105 for (unsigned int i = 0; i < get_num_threads(); i++)
106 delete thread_map->get(i);
111 delete condvar_waiters_map;
114 for (unsigned int i = 0; i < promises->size(); i++)
115 delete (*promises)[i];
118 delete pending_rel_seqs;
120 delete thrd_last_action;
121 delete thrd_last_fence_release;
128 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
130 action_list_t *tmp = hash->get(ptr);
132 tmp = new action_list_t();
138 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
140 SnapVector<action_list_t> *tmp = hash->get(ptr);
142 tmp = new SnapVector<action_list_t>();
149 * Restores user program to initial state and resets all model-checker data
152 void ModelChecker::reset_to_initial_state()
154 DEBUG("+++ Resetting to initial state +++\n");
155 node_stack->reset_execution();
158 * FIXME: if we utilize partial rollback, we will need to free only
159 * those pending actions which were NOT pending before the rollback
162 for (unsigned int i = 0; i < get_num_threads(); i++)
163 delete get_thread(int_to_id(i))->get_pending();
165 snapshot_backtrack_before(0);
168 /** @return a thread ID for a new Thread */
169 thread_id_t ModelChecker::get_next_id()
171 return priv->next_thread_id++;
174 /** @return the number of user threads created during this execution */
175 unsigned int ModelChecker::get_num_threads() const
177 return priv->next_thread_id;
181 * Must be called from user-thread context (e.g., through the global
182 * thread_current() interface)
184 * @return The currently executing Thread.
186 Thread * ModelChecker::get_current_thread() const
188 return scheduler->get_current_thread();
191 /** @return a sequence number for a new ModelAction */
192 modelclock_t ModelChecker::get_next_seq_num()
194 return ++priv->used_sequence_numbers;
197 Node * ModelChecker::get_curr_node() const
199 return node_stack->get_head();
203 * @brief Select the next thread to execute based on the curren action
205 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
206 * actions should be followed by the execution of their child thread. In either
207 * case, the current action should determine the next thread schedule.
209 * @param curr The current action
210 * @return The next thread to run, if the current action will determine this
211 * selection; otherwise NULL
213 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
215 /* Do not split atomic RMW */
217 return get_thread(curr);
218 /* Follow CREATE with the created thread */
219 if (curr->get_type() == THREAD_CREATE)
220 return curr->get_thread_operand();
225 * @brief Choose the next thread to execute.
227 * This function chooses the next thread that should execute. It can enforce
228 * execution replay/backtracking or, if the model-checker has no preference
229 * regarding the next thread (i.e., when exploring a new execution ordering),
230 * we defer to the scheduler.
232 * @return The next chosen thread to run, if any exist. Or else if the current
233 * execution should terminate, return NULL.
235 Thread * ModelChecker::get_next_thread()
240 * Have we completed exploring the preselected path? Then let the
244 return scheduler->select_next_thread();
246 /* Else, we are trying to replay an execution */
247 ModelAction *next = node_stack->get_next()->get_action();
249 if (next == diverge) {
250 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
251 earliest_diverge = diverge;
253 Node *nextnode = next->get_node();
254 Node *prevnode = nextnode->get_parent();
255 scheduler->update_sleep_set(prevnode);
257 /* Reached divergence point */
258 if (nextnode->increment_behaviors()) {
259 /* Execute the same thread with a new behavior */
260 tid = next->get_tid();
261 node_stack->pop_restofstack(2);
264 /* Make a different thread execute for next step */
265 scheduler->add_sleep(get_thread(next->get_tid()));
266 tid = prevnode->get_next_backtrack();
267 /* Make sure the backtracked thread isn't sleeping. */
268 node_stack->pop_restofstack(1);
269 if (diverge == earliest_diverge) {
270 earliest_diverge = prevnode->get_action();
273 /* Start the round robin scheduler from this thread id */
274 scheduler->set_scheduler_thread(tid);
275 /* The correct sleep set is in the parent node. */
278 DEBUG("*** Divergence point ***\n");
282 tid = next->get_tid();
284 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
285 ASSERT(tid != THREAD_ID_T_NONE);
286 return get_thread(id_to_int(tid));
290 * We need to know what the next actions of all threads in the sleep
291 * set will be. This method computes them and stores the actions at
292 * the corresponding thread object's pending action.
295 void ModelChecker::execute_sleep_set()
297 for (unsigned int i = 0; i < get_num_threads(); i++) {
298 thread_id_t tid = int_to_id(i);
299 Thread *thr = get_thread(tid);
300 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
301 thr->get_pending()->set_sleep_flag();
307 * @brief Should the current action wake up a given thread?
309 * @param curr The current action
310 * @param thread The thread that we might wake up
311 * @return True, if we should wake up the sleeping thread; false otherwise
313 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
315 const ModelAction *asleep = thread->get_pending();
316 /* Don't allow partial RMW to wake anyone up */
319 /* Synchronizing actions may have been backtracked */
320 if (asleep->could_synchronize_with(curr))
322 /* All acquire/release fences and fence-acquire/store-release */
323 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
325 /* Fence-release + store can awake load-acquire on the same location */
326 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
327 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
328 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
334 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
336 for (unsigned int i = 0; i < get_num_threads(); i++) {
337 Thread *thr = get_thread(int_to_id(i));
338 if (scheduler->is_sleep_set(thr)) {
339 if (should_wake_up(curr, thr))
340 /* Remove this thread from sleep set */
341 scheduler->remove_sleep(thr);
346 /** @brief Alert the model-checker that an incorrectly-ordered
347 * synchronization was made */
348 void ModelChecker::set_bad_synchronization()
350 priv->bad_synchronization = true;
354 * Check whether the current trace has triggered an assertion which should halt
357 * @return True, if the execution should be aborted; false otherwise
359 bool ModelChecker::has_asserted() const
361 return priv->asserted;
365 * Trigger a trace assertion which should cause this execution to be halted.
366 * This can be due to a detected bug or due to an infeasibility that should
369 void ModelChecker::set_assert()
371 priv->asserted = true;
375 * Check if we are in a deadlock. Should only be called at the end of an
376 * execution, although it should not give false positives in the middle of an
377 * execution (there should be some ENABLED thread).
379 * @return True if program is in a deadlock; false otherwise
381 bool ModelChecker::is_deadlocked() const
383 bool blocking_threads = false;
384 for (unsigned int i = 0; i < get_num_threads(); i++) {
385 thread_id_t tid = int_to_id(i);
388 Thread *t = get_thread(tid);
389 if (!t->is_model_thread() && t->get_pending())
390 blocking_threads = true;
392 return blocking_threads;
396 * Check if this is a complete execution. That is, have all thread completed
397 * execution (rather than exiting because sleep sets have forced a redundant
400 * @return True if the execution is complete.
402 bool ModelChecker::is_complete_execution() const
404 for (unsigned int i = 0; i < get_num_threads(); i++)
405 if (is_enabled(int_to_id(i)))
411 * @brief Assert a bug in the executing program.
413 * Use this function to assert any sort of bug in the user program. If the
414 * current trace is feasible (actually, a prefix of some feasible execution),
415 * then this execution will be aborted, printing the appropriate message. If
416 * the current trace is not yet feasible, the error message will be stashed and
417 * printed if the execution ever becomes feasible.
419 * @param msg Descriptive message for the bug (do not include newline char)
420 * @return True if bug is immediately-feasible
422 bool ModelChecker::assert_bug(const char *msg)
424 priv->bugs.push_back(new bug_message(msg));
426 if (isfeasibleprefix()) {
434 * @brief Assert a bug in the executing program, asserted by a user thread
435 * @see ModelChecker::assert_bug
436 * @param msg Descriptive message for the bug (do not include newline char)
438 void ModelChecker::assert_user_bug(const char *msg)
440 /* If feasible bug, bail out now */
442 switch_to_master(NULL);
445 /** @return True, if any bugs have been reported for this execution */
446 bool ModelChecker::have_bug_reports() const
448 return priv->bugs.size() != 0;
451 /** @brief Print bug report listing for this execution (if any bugs exist) */
452 void ModelChecker::print_bugs() const
454 if (have_bug_reports()) {
455 model_print("Bug report: %zu bug%s detected\n",
457 priv->bugs.size() > 1 ? "s" : "");
458 for (unsigned int i = 0; i < priv->bugs.size(); i++)
459 priv->bugs[i]->print();
464 * @brief Record end-of-execution stats
466 * Must be run when exiting an execution. Records various stats.
467 * @see struct execution_stats
469 void ModelChecker::record_stats()
472 if (!isfeasibleprefix())
473 stats.num_infeasible++;
474 else if (have_bug_reports())
475 stats.num_buggy_executions++;
476 else if (is_complete_execution())
477 stats.num_complete++;
479 stats.num_redundant++;
482 * @todo We can violate this ASSERT() when fairness/sleep sets
483 * conflict to cause an execution to terminate, e.g. with:
484 * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
486 //ASSERT(scheduler->all_threads_sleeping());
490 /** @brief Print execution stats */
491 void ModelChecker::print_stats() const
493 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
494 model_print("Number of redundant executions: %d\n", stats.num_redundant);
495 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
496 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
497 model_print("Total executions: %d\n", stats.num_total);
498 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
502 * @brief End-of-exeuction print
503 * @param printbugs Should any existing bugs be printed?
505 void ModelChecker::print_execution(bool printbugs) const
507 print_program_output();
509 if (params.verbose) {
510 model_print("Earliest divergence point since last feasible execution:\n");
511 if (earliest_diverge)
512 earliest_diverge->print();
514 model_print("(Not set)\n");
520 /* Don't print invalid bugs */
529 * Queries the model-checker for more executions to explore and, if one
530 * exists, resets the model-checker state to execute a new execution.
532 * @return If there are more executions to explore, return true. Otherwise,
535 bool ModelChecker::next_execution()
538 /* Is this execution a feasible execution that's worth bug-checking? */
539 bool complete = isfeasibleprefix() && (is_complete_execution() ||
542 /* End-of-execution bug checks */
545 assert_bug("Deadlock detected");
553 if (params.verbose || (complete && have_bug_reports()))
554 print_execution(complete);
556 clear_program_output();
559 earliest_diverge = NULL;
561 if ((diverge = get_next_backtrack()) == NULL)
565 model_print("Next execution will diverge at:\n");
569 reset_to_initial_state();
574 * @brief Find the last fence-related backtracking conflict for a ModelAction
576 * This function performs the search for the most recent conflicting action
577 * against which we should perform backtracking, as affected by fence
578 * operations. This includes pairs of potentially-synchronizing actions which
579 * occur due to fence-acquire or fence-release, and hence should be explored in
580 * the opposite execution order.
582 * @param act The current action
583 * @return The most recent action which conflicts with act due to fences
585 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
587 /* Only perform release/acquire fence backtracking for stores */
588 if (!act->is_write())
591 /* Find a fence-release (or, act is a release) */
592 ModelAction *last_release;
593 if (act->is_release())
596 last_release = get_last_fence_release(act->get_tid());
600 /* Skip past the release */
601 action_list_t *list = action_trace;
602 action_list_t::reverse_iterator rit;
603 for (rit = list->rbegin(); rit != list->rend(); rit++)
604 if (*rit == last_release)
606 ASSERT(rit != list->rend());
611 * load --sb-> fence-acquire */
612 ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
613 ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
614 bool found_acquire_fences = false;
615 for ( ; rit != list->rend(); rit++) {
616 ModelAction *prev = *rit;
617 if (act->same_thread(prev))
620 int tid = id_to_int(prev->get_tid());
622 if (prev->is_read() && act->same_var(prev)) {
623 if (prev->is_acquire()) {
624 /* Found most recent load-acquire, don't need
625 * to search for more fences */
626 if (!found_acquire_fences)
629 prior_loads[tid] = prev;
632 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
633 found_acquire_fences = true;
634 acquire_fences[tid] = prev;
638 ModelAction *latest_backtrack = NULL;
639 for (unsigned int i = 0; i < acquire_fences.size(); i++)
640 if (acquire_fences[i] && prior_loads[i])
641 if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
642 latest_backtrack = acquire_fences[i];
643 return latest_backtrack;
647 * @brief Find the last backtracking conflict for a ModelAction
649 * This function performs the search for the most recent conflicting action
650 * against which we should perform backtracking. This primary includes pairs of
651 * synchronizing actions which should be explored in the opposite execution
654 * @param act The current action
655 * @return The most recent action which conflicts with act
657 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
659 switch (act->get_type()) {
660 /* case ATOMIC_FENCE: fences don't directly cause backtracking */
664 ModelAction *ret = NULL;
666 /* linear search: from most recent to oldest */
667 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
668 action_list_t::reverse_iterator rit;
669 for (rit = list->rbegin(); rit != list->rend(); rit++) {
670 ModelAction *prev = *rit;
671 if (prev->could_synchronize_with(act)) {
677 ModelAction *ret2 = get_last_fence_conflict(act);
687 case ATOMIC_TRYLOCK: {
688 /* linear search: from most recent to oldest */
689 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
690 action_list_t::reverse_iterator rit;
691 for (rit = list->rbegin(); rit != list->rend(); rit++) {
692 ModelAction *prev = *rit;
693 if (act->is_conflicting_lock(prev))
698 case ATOMIC_UNLOCK: {
699 /* linear search: from most recent to oldest */
700 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701 action_list_t::reverse_iterator rit;
702 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703 ModelAction *prev = *rit;
704 if (!act->same_thread(prev) && prev->is_failed_trylock())
710 /* linear search: from most recent to oldest */
711 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
712 action_list_t::reverse_iterator rit;
713 for (rit = list->rbegin(); rit != list->rend(); rit++) {
714 ModelAction *prev = *rit;
715 if (!act->same_thread(prev) && prev->is_failed_trylock())
717 if (!act->same_thread(prev) && prev->is_notify())
723 case ATOMIC_NOTIFY_ALL:
724 case ATOMIC_NOTIFY_ONE: {
725 /* linear search: from most recent to oldest */
726 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
727 action_list_t::reverse_iterator rit;
728 for (rit = list->rbegin(); rit != list->rend(); rit++) {
729 ModelAction *prev = *rit;
730 if (!act->same_thread(prev) && prev->is_wait())
741 /** This method finds backtracking points where we should try to
742 * reorder the parameter ModelAction against.
744 * @param the ModelAction to find backtracking points for.
746 void ModelChecker::set_backtracking(ModelAction *act)
748 Thread *t = get_thread(act);
749 ModelAction *prev = get_last_conflict(act);
753 Node *node = prev->get_node()->get_parent();
755 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
756 int low_tid, high_tid;
757 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
758 low_tid = id_to_int(act->get_tid());
759 high_tid = low_tid + 1;
762 high_tid = get_num_threads();
765 for (int i = low_tid; i < high_tid; i++) {
766 thread_id_t tid = int_to_id(i);
768 /* Make sure this thread can be enabled here. */
769 if (i >= node->get_num_threads())
772 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
773 /* Don't backtrack into a point where the thread is disabled or sleeping. */
774 if (node->enabled_status(tid) != THREAD_ENABLED)
777 /* Check if this has been explored already */
778 if (node->has_been_explored(tid))
781 /* See if fairness allows */
782 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
784 for (int t = 0; t < node->get_num_threads(); t++) {
785 thread_id_t tother = int_to_id(t);
786 if (node->is_enabled(tother) && node->has_priority(tother)) {
795 /* See if CHESS-like yield fairness allows */
796 if (model->params.yieldon) {
798 for (int t = 0; t < node->get_num_threads(); t++) {
799 thread_id_t tother = int_to_id(t);
800 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
809 /* Cache the latest backtracking point */
810 set_latest_backtrack(prev);
812 /* If this is a new backtracking point, mark the tree */
813 if (!node->set_backtrack(tid))
815 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
816 id_to_int(prev->get_tid()),
817 id_to_int(t->get_id()));
826 * @brief Cache the a backtracking point as the "most recent", if eligible
828 * Note that this does not prepare the NodeStack for this backtracking
829 * operation, it only caches the action on a per-execution basis
831 * @param act The operation at which we should explore a different next action
832 * (i.e., backtracking point)
833 * @return True, if this action is now the most recent backtracking point;
836 bool ModelChecker::set_latest_backtrack(ModelAction *act)
838 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
839 priv->next_backtrack = act;
846 * Returns last backtracking point. The model checker will explore a different
847 * path for this point in the next execution.
848 * @return The ModelAction at which the next execution should diverge.
850 ModelAction * ModelChecker::get_next_backtrack()
852 ModelAction *next = priv->next_backtrack;
853 priv->next_backtrack = NULL;
858 * Processes a read model action.
859 * @param curr is the read model action to process.
860 * @return True if processing this read updates the mo_graph.
862 bool ModelChecker::process_read(ModelAction *curr)
864 Node *node = curr->get_node();
866 bool updated = false;
867 switch (node->get_read_from_status()) {
868 case READ_FROM_PAST: {
869 const ModelAction *rf = node->get_read_from_past();
872 mo_graph->startChanges();
874 ASSERT(!is_infeasible());
875 if (!check_recency(curr, rf)) {
876 if (node->increment_read_from()) {
877 mo_graph->rollbackChanges();
880 priv->too_many_reads = true;
884 updated = r_modification_order(curr, rf);
886 mo_graph->commitChanges();
887 mo_check_promises(curr, true);
890 case READ_FROM_PROMISE: {
891 Promise *promise = curr->get_node()->get_read_from_promise();
892 if (promise->add_reader(curr))
893 priv->failed_promise = true;
894 curr->set_read_from_promise(promise);
895 mo_graph->startChanges();
896 if (!check_recency(curr, promise))
897 priv->too_many_reads = true;
898 updated = r_modification_order(curr, promise);
899 mo_graph->commitChanges();
902 case READ_FROM_FUTURE: {
903 /* Read from future value */
904 struct future_value fv = node->get_future_value();
905 Promise *promise = new Promise(curr, fv);
906 curr->set_read_from_promise(promise);
907 promises->push_back(promise);
908 mo_graph->startChanges();
909 updated = r_modification_order(curr, promise);
910 mo_graph->commitChanges();
916 get_thread(curr)->set_return_value(curr->get_return_value());
922 * Processes a lock, trylock, or unlock model action. @param curr is
923 * the read model action to process.
925 * The try lock operation checks whether the lock is taken. If not,
926 * it falls to the normal lock operation case. If so, it returns
929 * The lock operation has already been checked that it is enabled, so
930 * it just grabs the lock and synchronizes with the previous unlock.
932 * The unlock operation has to re-enable all of the threads that are
933 * waiting on the lock.
935 * @return True if synchronization was updated; false otherwise
937 bool ModelChecker::process_mutex(ModelAction *curr)
939 std::mutex *mutex = curr->get_mutex();
940 struct std::mutex_state *state = NULL;
943 state = mutex->get_state();
945 switch (curr->get_type()) {
946 case ATOMIC_TRYLOCK: {
947 bool success = !state->locked;
948 curr->set_try_lock(success);
950 get_thread(curr)->set_return_value(0);
953 get_thread(curr)->set_return_value(1);
955 //otherwise fall into the lock case
957 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
958 assert_bug("Lock access before initialization");
959 state->locked = get_thread(curr);
960 ModelAction *unlock = get_last_unlock(curr);
961 //synchronize with the previous unlock statement
962 if (unlock != NULL) {
963 curr->synchronize_with(unlock);
969 case ATOMIC_UNLOCK: {
970 /* wake up the other threads */
971 for (unsigned int i = 0; i < get_num_threads(); i++) {
972 Thread *t = get_thread(int_to_id(i));
973 Thread *curr_thrd = get_thread(curr);
974 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
978 /* unlock the lock - after checking who was waiting on it */
979 state->locked = NULL;
981 if (!curr->is_wait())
982 break; /* The rest is only for ATOMIC_WAIT */
984 /* Should we go to sleep? (simulate spurious failures) */
985 if (curr->get_node()->get_misc() == 0) {
986 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
988 scheduler->sleep(get_thread(curr));
992 case ATOMIC_NOTIFY_ALL: {
993 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
994 //activate all the waiting threads
995 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
996 scheduler->wake(get_thread(*rit));
1001 case ATOMIC_NOTIFY_ONE: {
1002 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1003 int wakeupthread = curr->get_node()->get_misc();
1004 action_list_t::iterator it = waiters->begin();
1005 advance(it, wakeupthread);
1006 scheduler->wake(get_thread(*it));
1018 * @brief Check if the current pending promises allow a future value to be sent
1020 * If one of the following is true:
1021 * (a) there are no pending promises
1022 * (b) the reader and writer do not cross any promises
1023 * Then, it is safe to pass a future value back now.
1025 * Otherwise, we must save the pending future value until (a) or (b) is true
1027 * @param writer The operation which sends the future value. Must be a write.
1028 * @param reader The operation which will observe the value. Must be a read.
1029 * @return True if the future value can be sent now; false if it must wait.
1031 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1032 const ModelAction *reader) const
1034 if (promises->empty())
1036 for(int i=promises->size()-1;i>=0;i--) {
1037 ModelAction *pr=(*promises)[i]->get_reader(0);
1038 //reader is after promise...doesn't cross any promise
1041 //writer is after promise, reader before...bad...
1049 * @brief Add a future value to a reader
1051 * This function performs a few additional checks to ensure that the future
1052 * value can be feasibly observed by the reader
1054 * @param writer The operation whose value is sent. Must be a write.
1055 * @param reader The read operation which may read the future value. Must be a read.
1057 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1059 /* Do more ambitious checks now that mo is more complete */
1060 if (!mo_may_allow(writer, reader))
1063 Node *node = reader->get_node();
1065 /* Find an ancestor thread which exists at the time of the reader */
1066 Thread *write_thread = get_thread(writer);
1067 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1068 write_thread = write_thread->get_parent();
1070 struct future_value fv = {
1071 writer->get_write_value(),
1072 writer->get_seq_number() + params.maxfuturedelay,
1073 write_thread->get_id(),
1075 if (node->add_future_value(fv))
1076 set_latest_backtrack(reader);
1080 * Process a write ModelAction
1081 * @param curr The ModelAction to process
1082 * @return True if the mo_graph was updated or promises were resolved
1084 bool ModelChecker::process_write(ModelAction *curr)
1086 /* Readers to which we may send our future value */
1087 ModelVector<ModelAction *> send_fv;
1089 const ModelAction *earliest_promise_reader;
1090 bool updated_promises = false;
1092 bool updated_mod_order = w_modification_order(curr, &send_fv);
1093 Promise *promise = pop_promise_to_resolve(curr);
1096 earliest_promise_reader = promise->get_reader(0);
1097 updated_promises = resolve_promise(curr, promise);
1099 earliest_promise_reader = NULL;
1101 for (unsigned int i = 0; i < send_fv.size(); i++) {
1102 ModelAction *read = send_fv[i];
1104 /* Don't send future values to reads after the Promise we resolve */
1105 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1106 /* Check if future value can be sent immediately */
1107 if (promises_may_allow(curr, read)) {
1108 add_future_value(curr, read);
1110 futurevalues->push_back(PendingFutureValue(curr, read));
1115 /* Check the pending future values */
1116 for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1117 struct PendingFutureValue pfv = (*futurevalues)[i];
1118 if (promises_may_allow(pfv.writer, pfv.reader)) {
1119 add_future_value(pfv.writer, pfv.reader);
1120 futurevalues->erase(futurevalues->begin() + i);
1124 mo_graph->commitChanges();
1125 mo_check_promises(curr, false);
1127 get_thread(curr)->set_return_value(VALUE_NONE);
1128 return updated_mod_order || updated_promises;
1132 * Process a fence ModelAction
1133 * @param curr The ModelAction to process
1134 * @return True if synchronization was updated
1136 bool ModelChecker::process_fence(ModelAction *curr)
1139 * fence-relaxed: no-op
1140 * fence-release: only log the occurence (not in this function), for
1141 * use in later synchronization
1142 * fence-acquire (this function): search for hypothetical release
1144 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1146 bool updated = false;
1147 if (curr->is_acquire()) {
1148 action_list_t *list = action_trace;
1149 action_list_t::reverse_iterator rit;
1150 /* Find X : is_read(X) && X --sb-> curr */
1151 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1152 ModelAction *act = *rit;
1155 if (act->get_tid() != curr->get_tid())
1157 /* Stop at the beginning of the thread */
1158 if (act->is_thread_start())
1160 /* Stop once we reach a prior fence-acquire */
1161 if (act->is_fence() && act->is_acquire())
1163 if (!act->is_read())
1165 /* read-acquire will find its own release sequences */
1166 if (act->is_acquire())
1169 /* Establish hypothetical release sequences */
1170 rel_heads_list_t release_heads;
1171 get_release_seq_heads(curr, act, &release_heads);
1172 for (unsigned int i = 0; i < release_heads.size(); i++)
1173 if (!curr->synchronize_with(release_heads[i]))
1174 set_bad_synchronization();
1175 if (release_heads.size() != 0)
1183 * @brief Process the current action for thread-related activity
1185 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1186 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1187 * synchronization, etc. This function is a no-op for non-THREAD actions
1188 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1190 * @param curr The current action
1191 * @return True if synchronization was updated or a thread completed
1193 bool ModelChecker::process_thread_action(ModelAction *curr)
1195 bool updated = false;
1197 switch (curr->get_type()) {
1198 case THREAD_CREATE: {
1199 thrd_t *thrd = (thrd_t *)curr->get_location();
1200 struct thread_params *params = (struct thread_params *)curr->get_value();
1201 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1203 th->set_creation(curr);
1204 /* Promises can be satisfied by children */
1205 for (unsigned int i = 0; i < promises->size(); i++) {
1206 Promise *promise = (*promises)[i];
1207 if (promise->thread_is_available(curr->get_tid()))
1208 promise->add_thread(th->get_id());
1213 Thread *blocking = curr->get_thread_operand();
1214 ModelAction *act = get_last_action(blocking->get_id());
1215 curr->synchronize_with(act);
1216 updated = true; /* trigger rel-seq checks */
1219 case THREAD_FINISH: {
1220 Thread *th = get_thread(curr);
1221 /* Wake up any joining threads */
1222 for (unsigned int i = 0; i < get_num_threads(); i++) {
1223 Thread *waiting = get_thread(int_to_id(i));
1224 if (waiting->waiting_on() == th &&
1225 waiting->get_pending()->is_thread_join())
1226 scheduler->wake(waiting);
1229 /* Completed thread can't satisfy promises */
1230 for (unsigned int i = 0; i < promises->size(); i++) {
1231 Promise *promise = (*promises)[i];
1232 if (promise->thread_is_available(th->get_id()))
1233 if (promise->eliminate_thread(th->get_id()))
1234 priv->failed_promise = true;
1236 updated = true; /* trigger rel-seq checks */
1239 case THREAD_START: {
1240 check_promises(curr->get_tid(), NULL, curr->get_cv());
1251 * @brief Process the current action for release sequence fixup activity
1253 * Performs model-checker release sequence fixups for the current action,
1254 * forcing a single pending release sequence to break (with a given, potential
1255 * "loose" write) or to complete (i.e., synchronize). If a pending release
1256 * sequence forms a complete release sequence, then we must perform the fixup
1257 * synchronization, mo_graph additions, etc.
1259 * @param curr The current action; must be a release sequence fixup action
1260 * @param work_queue The work queue to which to add work items as they are
1263 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1265 const ModelAction *write = curr->get_node()->get_relseq_break();
1266 struct release_seq *sequence = pending_rel_seqs->back();
1267 pending_rel_seqs->pop_back();
1269 ModelAction *acquire = sequence->acquire;
1270 const ModelAction *rf = sequence->rf;
1271 const ModelAction *release = sequence->release;
1275 ASSERT(release->same_thread(rf));
1277 if (write == NULL) {
1279 * @todo Forcing a synchronization requires that we set
1280 * modification order constraints. For instance, we can't allow
1281 * a fixup sequence in which two separate read-acquire
1282 * operations read from the same sequence, where the first one
1283 * synchronizes and the other doesn't. Essentially, we can't
1284 * allow any writes to insert themselves between 'release' and
1288 /* Must synchronize */
1289 if (!acquire->synchronize_with(release)) {
1290 set_bad_synchronization();
1293 /* Re-check all pending release sequences */
1294 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1295 /* Re-check act for mo_graph edges */
1296 work_queue->push_back(MOEdgeWorkEntry(acquire));
1298 /* propagate synchronization to later actions */
1299 action_list_t::reverse_iterator rit = action_trace->rbegin();
1300 for (; (*rit) != acquire; rit++) {
1301 ModelAction *propagate = *rit;
1302 if (acquire->happens_before(propagate)) {
1303 propagate->synchronize_with(acquire);
1304 /* Re-check 'propagate' for mo_graph edges */
1305 work_queue->push_back(MOEdgeWorkEntry(propagate));
1309 /* Break release sequence with new edges:
1310 * release --mo--> write --mo--> rf */
1311 mo_graph->addEdge(release, write);
1312 mo_graph->addEdge(write, rf);
1315 /* See if we have realized a data race */
1320 * Initialize the current action by performing one or more of the following
1321 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1322 * in the NodeStack, manipulating backtracking sets, allocating and
1323 * initializing clock vectors, and computing the promises to fulfill.
1325 * @param curr The current action, as passed from the user context; may be
1326 * freed/invalidated after the execution of this function, with a different
1327 * action "returned" its place (pass-by-reference)
1328 * @return True if curr is a newly-explored action; false otherwise
1330 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1332 ModelAction *newcurr;
1334 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1335 newcurr = process_rmw(*curr);
1338 if (newcurr->is_rmw())
1339 compute_promises(newcurr);
1345 (*curr)->set_seq_number(get_next_seq_num());
1347 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1349 /* First restore type and order in case of RMW operation */
1350 if ((*curr)->is_rmwr())
1351 newcurr->copy_typeandorder(*curr);
1353 ASSERT((*curr)->get_location() == newcurr->get_location());
1354 newcurr->copy_from_new(*curr);
1356 /* Discard duplicate ModelAction; use action from NodeStack */
1359 /* Always compute new clock vector */
1360 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1363 return false; /* Action was explored previously */
1367 /* Always compute new clock vector */
1368 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1370 /* Assign most recent release fence */
1371 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1374 * Perform one-time actions when pushing new ModelAction onto
1377 if (newcurr->is_write())
1378 compute_promises(newcurr);
1379 else if (newcurr->is_relseq_fixup())
1380 compute_relseq_breakwrites(newcurr);
1381 else if (newcurr->is_wait())
1382 newcurr->get_node()->set_misc_max(2);
1383 else if (newcurr->is_notify_one()) {
1384 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1386 return true; /* This was a new ModelAction */
1391 * @brief Establish reads-from relation between two actions
1393 * Perform basic operations involved with establishing a concrete rf relation,
1394 * including setting the ModelAction data and checking for release sequences.
1396 * @param act The action that is reading (must be a read)
1397 * @param rf The action from which we are reading (must be a write)
1399 * @return True if this read established synchronization
1401 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1404 ASSERT(rf->is_write());
1406 act->set_read_from(rf);
1407 if (act->is_acquire()) {
1408 rel_heads_list_t release_heads;
1409 get_release_seq_heads(act, act, &release_heads);
1410 int num_heads = release_heads.size();
1411 for (unsigned int i = 0; i < release_heads.size(); i++)
1412 if (!act->synchronize_with(release_heads[i])) {
1413 set_bad_synchronization();
1416 return num_heads > 0;
1422 * Check promises and eliminate potentially-satisfying threads when a thread is
1423 * blocked (e.g., join, lock). A thread which is waiting on another thread can
1424 * no longer satisfy a promise generated from that thread.
1426 * @param blocker The thread on which a thread is waiting
1427 * @param waiting The waiting thread
1429 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1431 for (unsigned int i = 0; i < promises->size(); i++) {
1432 Promise *promise = (*promises)[i];
1433 if (!promise->thread_is_available(waiting->get_id()))
1435 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1436 ModelAction *reader = promise->get_reader(j);
1437 if (reader->get_tid() != blocker->get_id())
1439 if (promise->eliminate_thread(waiting->get_id())) {
1440 /* Promise has failed */
1441 priv->failed_promise = true;
1443 /* Only eliminate the 'waiting' thread once */
1451 * @brief Check whether a model action is enabled.
1453 * Checks whether a lock or join operation would be successful (i.e., is the
1454 * lock already locked, or is the joined thread already complete). If not, put
1455 * the action in a waiter list.
1457 * @param curr is the ModelAction to check whether it is enabled.
1458 * @return a bool that indicates whether the action is enabled.
1460 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1461 if (curr->is_lock()) {
1462 std::mutex *lock = curr->get_mutex();
1463 struct std::mutex_state *state = lock->get_state();
1466 } else if (curr->is_thread_join()) {
1467 Thread *blocking = curr->get_thread_operand();
1468 if (!blocking->is_complete()) {
1469 thread_blocking_check_promises(blocking, get_thread(curr));
1478 * This is the heart of the model checker routine. It performs model-checking
1479 * actions corresponding to a given "current action." Among other processes, it
1480 * calculates reads-from relationships, updates synchronization clock vectors,
1481 * forms a memory_order constraints graph, and handles replay/backtrack
1482 * execution when running permutations of previously-observed executions.
1484 * @param curr The current action to process
1485 * @return The ModelAction that is actually executed; may be different than
1486 * curr; may be NULL, if the current action is not enabled to run
1488 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1491 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1492 bool newly_explored = initialize_curr_action(&curr);
1498 wake_up_sleeping_actions(curr);
1500 /* Compute fairness information for CHESS yield algorithm */
1501 if (model->params.yieldon) {
1502 curr->get_node()->update_yield(scheduler);
1505 /* Add the action to lists before any other model-checking tasks */
1506 if (!second_part_of_rmw)
1507 add_action_to_lists(curr);
1509 /* Build may_read_from set for newly-created actions */
1510 if (newly_explored && curr->is_read())
1511 build_may_read_from(curr);
1513 /* Initialize work_queue with the "current action" work */
1514 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1515 while (!work_queue.empty() && !has_asserted()) {
1516 WorkQueueEntry work = work_queue.front();
1517 work_queue.pop_front();
1519 switch (work.type) {
1520 case WORK_CHECK_CURR_ACTION: {
1521 ModelAction *act = work.action;
1522 bool update = false; /* update this location's release seq's */
1523 bool update_all = false; /* update all release seq's */
1525 if (process_thread_action(curr))
1528 if (act->is_read() && !second_part_of_rmw && process_read(act))
1531 if (act->is_write() && process_write(act))
1534 if (act->is_fence() && process_fence(act))
1537 if (act->is_mutex_op() && process_mutex(act))
1540 if (act->is_relseq_fixup())
1541 process_relseq_fixup(curr, &work_queue);
1544 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1546 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1549 case WORK_CHECK_RELEASE_SEQ:
1550 resolve_release_sequences(work.location, &work_queue);
1552 case WORK_CHECK_MO_EDGES: {
1553 /** @todo Complete verification of work_queue */
1554 ModelAction *act = work.action;
1555 bool updated = false;
1557 if (act->is_read()) {
1558 const ModelAction *rf = act->get_reads_from();
1559 const Promise *promise = act->get_reads_from_promise();
1561 if (r_modification_order(act, rf))
1563 } else if (promise) {
1564 if (r_modification_order(act, promise))
1568 if (act->is_write()) {
1569 if (w_modification_order(act, NULL))
1572 mo_graph->commitChanges();
1575 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1584 check_curr_backtracking(curr);
1585 set_backtracking(curr);
1589 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1591 Node *currnode = curr->get_node();
1592 Node *parnode = currnode->get_parent();
1594 if ((parnode && !parnode->backtrack_empty()) ||
1595 !currnode->misc_empty() ||
1596 !currnode->read_from_empty() ||
1597 !currnode->promise_empty() ||
1598 !currnode->relseq_break_empty()) {
1599 set_latest_backtrack(curr);
1603 bool ModelChecker::promises_expired() const
1605 for (unsigned int i = 0; i < promises->size(); i++) {
1606 Promise *promise = (*promises)[i];
1607 if (promise->get_expiration() < priv->used_sequence_numbers)
1614 * This is the strongest feasibility check available.
1615 * @return whether the current trace (partial or complete) must be a prefix of
1618 bool ModelChecker::isfeasibleprefix() const
1620 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1624 * Print disagnostic information about an infeasible execution
1625 * @param prefix A string to prefix the output with; if NULL, then a default
1626 * message prefix will be provided
1628 void ModelChecker::print_infeasibility(const char *prefix) const
1632 if (mo_graph->checkForCycles())
1633 ptr += sprintf(ptr, "[mo cycle]");
1634 if (priv->failed_promise)
1635 ptr += sprintf(ptr, "[failed promise]");
1636 if (priv->too_many_reads)
1637 ptr += sprintf(ptr, "[too many reads]");
1638 if (priv->no_valid_reads)
1639 ptr += sprintf(ptr, "[no valid reads-from]");
1640 if (priv->bad_synchronization)
1641 ptr += sprintf(ptr, "[bad sw ordering]");
1642 if (promises_expired())
1643 ptr += sprintf(ptr, "[promise expired]");
1644 if (promises->size() != 0)
1645 ptr += sprintf(ptr, "[unresolved promise]");
1647 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1651 * Returns whether the current completed trace is feasible, except for pending
1652 * release sequences.
1654 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1656 return !is_infeasible() && promises->size() == 0;
1660 * Check if the current partial trace is infeasible. Does not check any
1661 * end-of-execution flags, which might rule out the execution. Thus, this is
1662 * useful only for ruling an execution as infeasible.
1663 * @return whether the current partial trace is infeasible.
1665 bool ModelChecker::is_infeasible() const
1667 return mo_graph->checkForCycles() ||
1668 priv->no_valid_reads ||
1669 priv->failed_promise ||
1670 priv->too_many_reads ||
1671 priv->bad_synchronization ||
1675 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1676 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1677 ModelAction *lastread = get_last_action(act->get_tid());
1678 lastread->process_rmw(act);
1679 if (act->is_rmw()) {
1680 if (lastread->get_reads_from())
1681 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1683 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1684 mo_graph->commitChanges();
1690 * A helper function for ModelChecker::check_recency, to check if the current
1691 * thread is able to read from a different write/promise for 'params.maxreads'
1692 * number of steps and if that write/promise should become visible (i.e., is
1693 * ordered later in the modification order). This helps model memory liveness.
1695 * @param curr The current action. Must be a read.
1696 * @param rf The write/promise from which we plan to read
1697 * @param other_rf The write/promise from which we may read
1698 * @return True if we were able to read from other_rf for params.maxreads steps
1700 template <typename T, typename U>
1701 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1703 /* Need a different write/promise */
1704 if (other_rf->equals(rf))
1707 /* Only look for "newer" writes/promises */
1708 if (!mo_graph->checkReachable(rf, other_rf))
1711 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1712 action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1713 action_list_t::reverse_iterator rit = list->rbegin();
1714 ASSERT((*rit) == curr);
1715 /* Skip past curr */
1718 /* Does this write/promise work for everyone? */
1719 for (int i = 0; i < params.maxreads; i++, rit++) {
1720 ModelAction *act = *rit;
1721 if (!act->may_read_from(other_rf))
1728 * Checks whether a thread has read from the same write or Promise for too many
1729 * times without seeing the effects of a later write/Promise.
1732 * 1) there must a different write/promise that we could read from,
1733 * 2) we must have read from the same write/promise in excess of maxreads times,
1734 * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1735 * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1737 * If so, we decide that the execution is no longer feasible.
1739 * @param curr The current action. Must be a read.
1740 * @param rf The ModelAction/Promise from which we might read.
1741 * @return True if the read should succeed; false otherwise
1743 template <typename T>
1744 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1746 if (!params.maxreads)
1749 //NOTE: Next check is just optimization, not really necessary....
1750 if (curr->get_node()->get_read_from_past_size() +
1751 curr->get_node()->get_read_from_promise_size() <= 1)
1754 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1755 int tid = id_to_int(curr->get_tid());
1756 ASSERT(tid < (int)thrd_lists->size());
1757 action_list_t *list = &(*thrd_lists)[tid];
1758 action_list_t::reverse_iterator rit = list->rbegin();
1759 ASSERT((*rit) == curr);
1760 /* Skip past curr */
1763 action_list_t::reverse_iterator ritcopy = rit;
1764 /* See if we have enough reads from the same value */
1765 for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1766 if (ritcopy == list->rend())
1768 ModelAction *act = *ritcopy;
1769 if (!act->is_read())
1771 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1773 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1775 if (act->get_node()->get_read_from_past_size() +
1776 act->get_node()->get_read_from_promise_size() <= 1)
1779 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1780 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1781 if (should_read_instead(curr, rf, write))
1782 return false; /* liveness failure */
1784 for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1785 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1786 if (should_read_instead(curr, rf, promise))
1787 return false; /* liveness failure */
1793 * Updates the mo_graph with the constraints imposed from the current
1796 * Basic idea is the following: Go through each other thread and find
1797 * the last action that happened before our read. Two cases:
1799 * (1) The action is a write => that write must either occur before
1800 * the write we read from or be the write we read from.
1802 * (2) The action is a read => the write that that action read from
1803 * must occur before the write we read from or be the same write.
1805 * @param curr The current action. Must be a read.
1806 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1807 * @return True if modification order edges were added; false otherwise
1809 template <typename rf_type>
1810 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1812 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1815 ASSERT(curr->is_read());
1817 /* Last SC fence in the current thread */
1818 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1819 ModelAction *last_sc_write = NULL;
1820 if (curr->is_seqcst())
1821 last_sc_write = get_last_seq_cst_write(curr);
1823 /* Iterate over all threads */
1824 for (i = 0; i < thrd_lists->size(); i++) {
1825 /* Last SC fence in thread i */
1826 ModelAction *last_sc_fence_thread_local = NULL;
1827 if (int_to_id((int)i) != curr->get_tid())
1828 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1830 /* Last SC fence in thread i, before last SC fence in current thread */
1831 ModelAction *last_sc_fence_thread_before = NULL;
1832 if (last_sc_fence_local)
1833 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1835 /* Iterate over actions in thread, starting from most recent */
1836 action_list_t *list = &(*thrd_lists)[i];
1837 action_list_t::reverse_iterator rit;
1838 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1839 ModelAction *act = *rit;
1844 /* Don't want to add reflexive edges on 'rf' */
1845 if (act->equals(rf)) {
1846 if (act->happens_before(curr))
1852 if (act->is_write()) {
1853 /* C++, Section 29.3 statement 5 */
1854 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1855 *act < *last_sc_fence_thread_local) {
1856 added = mo_graph->addEdge(act, rf) || added;
1859 /* C++, Section 29.3 statement 4 */
1860 else if (act->is_seqcst() && last_sc_fence_local &&
1861 *act < *last_sc_fence_local) {
1862 added = mo_graph->addEdge(act, rf) || added;
1865 /* C++, Section 29.3 statement 6 */
1866 else if (last_sc_fence_thread_before &&
1867 *act < *last_sc_fence_thread_before) {
1868 added = mo_graph->addEdge(act, rf) || added;
1873 /* C++, Section 29.3 statement 3 (second subpoint) */
1874 if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1875 added = mo_graph->addEdge(act, rf) || added;
1880 * Include at most one act per-thread that "happens
1883 if (act->happens_before(curr)) {
1884 if (act->is_write()) {
1885 added = mo_graph->addEdge(act, rf) || added;
1887 const ModelAction *prevrf = act->get_reads_from();
1888 const Promise *prevrf_promise = act->get_reads_from_promise();
1890 if (!prevrf->equals(rf))
1891 added = mo_graph->addEdge(prevrf, rf) || added;
1892 } else if (!prevrf_promise->equals(rf)) {
1893 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1902 * All compatible, thread-exclusive promises must be ordered after any
1903 * concrete loads from the same thread
1905 for (unsigned int i = 0; i < promises->size(); i++)
1906 if ((*promises)[i]->is_compatible_exclusive(curr))
1907 added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1913 * Updates the mo_graph with the constraints imposed from the current write.
1915 * Basic idea is the following: Go through each other thread and find
1916 * the lastest action that happened before our write. Two cases:
1918 * (1) The action is a write => that write must occur before
1921 * (2) The action is a read => the write that that action read from
1922 * must occur before the current write.
1924 * This method also handles two other issues:
1926 * (I) Sequential Consistency: Making sure that if the current write is
1927 * seq_cst, that it occurs after the previous seq_cst write.
1929 * (II) Sending the write back to non-synchronizing reads.
1931 * @param curr The current action. Must be a write.
1932 * @param send_fv A vector for stashing reads to which we may pass our future
1933 * value. If NULL, then don't record any future values.
1934 * @return True if modification order edges were added; false otherwise
1936 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1938 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1941 ASSERT(curr->is_write());
1943 if (curr->is_seqcst()) {
1944 /* We have to at least see the last sequentially consistent write,
1945 so we are initialized. */
1946 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1947 if (last_seq_cst != NULL) {
1948 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1952 /* Last SC fence in the current thread */
1953 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1955 /* Iterate over all threads */
1956 for (i = 0; i < thrd_lists->size(); i++) {
1957 /* Last SC fence in thread i, before last SC fence in current thread */
1958 ModelAction *last_sc_fence_thread_before = NULL;
1959 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1960 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1962 /* Iterate over actions in thread, starting from most recent */
1963 action_list_t *list = &(*thrd_lists)[i];
1964 action_list_t::reverse_iterator rit;
1965 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1966 ModelAction *act = *rit;
1969 * 1) If RMW and it actually read from something, then we
1970 * already have all relevant edges, so just skip to next
1973 * 2) If RMW and it didn't read from anything, we should
1974 * whatever edge we can get to speed up convergence.
1976 * 3) If normal write, we need to look at earlier actions, so
1977 * continue processing list.
1979 if (curr->is_rmw()) {
1980 if (curr->get_reads_from() != NULL)
1988 /* C++, Section 29.3 statement 7 */
1989 if (last_sc_fence_thread_before && act->is_write() &&
1990 *act < *last_sc_fence_thread_before) {
1991 added = mo_graph->addEdge(act, curr) || added;
1996 * Include at most one act per-thread that "happens
1999 if (act->happens_before(curr)) {
2001 * Note: if act is RMW, just add edge:
2003 * The following edge should be handled elsewhere:
2004 * readfrom(act) --mo--> act
2006 if (act->is_write())
2007 added = mo_graph->addEdge(act, curr) || added;
2008 else if (act->is_read()) {
2009 //if previous read accessed a null, just keep going
2010 if (act->get_reads_from() == NULL)
2012 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2015 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2016 !act->same_thread(curr)) {
2017 /* We have an action that:
2018 (1) did not happen before us
2019 (2) is a read and we are a write
2020 (3) cannot synchronize with us
2021 (4) is in a different thread
2023 that read could potentially read from our write. Note that
2024 these checks are overly conservative at this point, we'll
2025 do more checks before actually removing the
2029 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2030 if (!is_infeasible())
2031 send_fv->push_back(act);
2032 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2033 add_future_value(curr, act);
2040 * All compatible, thread-exclusive promises must be ordered after any
2041 * concrete stores to the same thread, or else they can be merged with
2044 for (unsigned int i = 0; i < promises->size(); i++)
2045 if ((*promises)[i]->is_compatible_exclusive(curr))
2046 added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2051 /** Arbitrary reads from the future are not allowed. Section 29.3
2052 * part 9 places some constraints. This method checks one result of constraint
2053 * constraint. Others require compiler support. */
2054 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2056 if (!writer->is_rmw())
2059 if (!reader->is_rmw())
2062 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2063 if (search == reader)
2065 if (search->get_tid() == reader->get_tid() &&
2066 search->happens_before(reader))
2074 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2075 * some constraints. This method checks one the following constraint (others
2076 * require compiler support):
2078 * If X --hb-> Y --mo-> Z, then X should not read from Z.
2080 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2082 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2084 /* Iterate over all threads */
2085 for (i = 0; i < thrd_lists->size(); i++) {
2086 const ModelAction *write_after_read = NULL;
2088 /* Iterate over actions in thread, starting from most recent */
2089 action_list_t *list = &(*thrd_lists)[i];
2090 action_list_t::reverse_iterator rit;
2091 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2092 ModelAction *act = *rit;
2094 /* Don't disallow due to act == reader */
2095 if (!reader->happens_before(act) || reader == act)
2097 else if (act->is_write())
2098 write_after_read = act;
2099 else if (act->is_read() && act->get_reads_from() != NULL)
2100 write_after_read = act->get_reads_from();
2103 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2110 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2111 * The ModelAction under consideration is expected to be taking part in
2112 * release/acquire synchronization as an object of the "reads from" relation.
2113 * Note that this can only provide release sequence support for RMW chains
2114 * which do not read from the future, as those actions cannot be traced until
2115 * their "promise" is fulfilled. Similarly, we may not even establish the
2116 * presence of a release sequence with certainty, as some modification order
2117 * constraints may be decided further in the future. Thus, this function
2118 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2119 * and a boolean representing certainty.
2121 * @param rf The action that might be part of a release sequence. Must be a
2123 * @param release_heads A pass-by-reference style return parameter. After
2124 * execution of this function, release_heads will contain the heads of all the
2125 * relevant release sequences, if any exists with certainty
2126 * @param pending A pass-by-reference style return parameter which is only used
2127 * when returning false (i.e., uncertain). Returns most information regarding
2128 * an uncertain release sequence, including any write operations that might
2129 * break the sequence.
2130 * @return true, if the ModelChecker is certain that release_heads is complete;
2133 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2134 rel_heads_list_t *release_heads,
2135 struct release_seq *pending) const
2137 /* Only check for release sequences if there are no cycles */
2138 if (mo_graph->checkForCycles())
2141 for ( ; rf != NULL; rf = rf->get_reads_from()) {
2142 ASSERT(rf->is_write());
2144 if (rf->is_release())
2145 release_heads->push_back(rf);
2146 else if (rf->get_last_fence_release())
2147 release_heads->push_back(rf->get_last_fence_release());
2149 break; /* End of RMW chain */
2151 /** @todo Need to be smarter here... In the linux lock
2152 * example, this will run to the beginning of the program for
2154 /** @todo The way to be smarter here is to keep going until 1
2155 * thread has a release preceded by an acquire and you've seen
2158 /* acq_rel RMW is a sufficient stopping condition */
2159 if (rf->is_acquire() && rf->is_release())
2160 return true; /* complete */
2163 /* read from future: need to settle this later */
2165 return false; /* incomplete */
2168 if (rf->is_release())
2169 return true; /* complete */
2171 /* else relaxed write
2172 * - check for fence-release in the same thread (29.8, stmt. 3)
2173 * - check modification order for contiguous subsequence
2174 * -> rf must be same thread as release */
2176 const ModelAction *fence_release = rf->get_last_fence_release();
2177 /* Synchronize with a fence-release unconditionally; we don't need to
2178 * find any more "contiguous subsequence..." for it */
2180 release_heads->push_back(fence_release);
2182 int tid = id_to_int(rf->get_tid());
2183 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2184 action_list_t *list = &(*thrd_lists)[tid];
2185 action_list_t::const_reverse_iterator rit;
2187 /* Find rf in the thread list */
2188 rit = std::find(list->rbegin(), list->rend(), rf);
2189 ASSERT(rit != list->rend());
2191 /* Find the last {write,fence}-release */
2192 for (; rit != list->rend(); rit++) {
2193 if (fence_release && *(*rit) < *fence_release)
2195 if ((*rit)->is_release())
2198 if (rit == list->rend()) {
2199 /* No write-release in this thread */
2200 return true; /* complete */
2201 } else if (fence_release && *(*rit) < *fence_release) {
2202 /* The fence-release is more recent (and so, "stronger") than
2203 * the most recent write-release */
2204 return true; /* complete */
2205 } /* else, need to establish contiguous release sequence */
2206 ModelAction *release = *rit;
2208 ASSERT(rf->same_thread(release));
2210 pending->writes.clear();
2212 bool certain = true;
2213 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2214 if (id_to_int(rf->get_tid()) == (int)i)
2216 list = &(*thrd_lists)[i];
2218 /* Can we ensure no future writes from this thread may break
2219 * the release seq? */
2220 bool future_ordered = false;
2222 ModelAction *last = get_last_action(int_to_id(i));
2223 Thread *th = get_thread(int_to_id(i));
2224 if ((last && rf->happens_before(last)) ||
2227 future_ordered = true;
2229 ASSERT(!th->is_model_thread() || future_ordered);
2231 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2232 const ModelAction *act = *rit;
2233 /* Reach synchronization -> this thread is complete */
2234 if (act->happens_before(release))
2236 if (rf->happens_before(act)) {
2237 future_ordered = true;
2241 /* Only non-RMW writes can break release sequences */
2242 if (!act->is_write() || act->is_rmw())
2245 /* Check modification order */
2246 if (mo_graph->checkReachable(rf, act)) {
2247 /* rf --mo--> act */
2248 future_ordered = true;
2251 if (mo_graph->checkReachable(act, release))
2252 /* act --mo--> release */
2254 if (mo_graph->checkReachable(release, act) &&
2255 mo_graph->checkReachable(act, rf)) {
2256 /* release --mo-> act --mo--> rf */
2257 return true; /* complete */
2259 /* act may break release sequence */
2260 pending->writes.push_back(act);
2263 if (!future_ordered)
2264 certain = false; /* This thread is uncertain */
2268 release_heads->push_back(release);
2269 pending->writes.clear();
2271 pending->release = release;
2278 * An interface for getting the release sequence head(s) with which a
2279 * given ModelAction must synchronize. This function only returns a non-empty
2280 * result when it can locate a release sequence head with certainty. Otherwise,
2281 * it may mark the internal state of the ModelChecker so that it will handle
2282 * the release sequence at a later time, causing @a acquire to update its
2283 * synchronization at some later point in execution.
2285 * @param acquire The 'acquire' action that may synchronize with a release
2287 * @param read The read action that may read from a release sequence; this may
2288 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2289 * when 'acquire' is a fence-acquire)
2290 * @param release_heads A pass-by-reference return parameter. Will be filled
2291 * with the head(s) of the release sequence(s), if they exists with certainty.
2292 * @see ModelChecker::release_seq_heads
2294 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2295 ModelAction *read, rel_heads_list_t *release_heads)
2297 const ModelAction *rf = read->get_reads_from();
2298 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2299 sequence->acquire = acquire;
2300 sequence->read = read;
2302 if (!release_seq_heads(rf, release_heads, sequence)) {
2303 /* add act to 'lazy checking' list */
2304 pending_rel_seqs->push_back(sequence);
2306 snapshot_free(sequence);
2311 * Attempt to resolve all stashed operations that might synchronize with a
2312 * release sequence for a given location. This implements the "lazy" portion of
2313 * determining whether or not a release sequence was contiguous, since not all
2314 * modification order information is present at the time an action occurs.
2316 * @param location The location/object that should be checked for release
2317 * sequence resolutions. A NULL value means to check all locations.
2318 * @param work_queue The work queue to which to add work items as they are
2320 * @return True if any updates occurred (new synchronization, new mo_graph
2323 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2325 bool updated = false;
2326 SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2327 while (it != pending_rel_seqs->end()) {
2328 struct release_seq *pending = *it;
2329 ModelAction *acquire = pending->acquire;
2330 const ModelAction *read = pending->read;
2332 /* Only resolve sequences on the given location, if provided */
2333 if (location && read->get_location() != location) {
2338 const ModelAction *rf = read->get_reads_from();
2339 rel_heads_list_t release_heads;
2341 complete = release_seq_heads(rf, &release_heads, pending);
2342 for (unsigned int i = 0; i < release_heads.size(); i++) {
2343 if (!acquire->has_synchronized_with(release_heads[i])) {
2344 if (acquire->synchronize_with(release_heads[i]))
2347 set_bad_synchronization();
2352 /* Re-check all pending release sequences */
2353 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2354 /* Re-check read-acquire for mo_graph edges */
2355 if (acquire->is_read())
2356 work_queue->push_back(MOEdgeWorkEntry(acquire));
2358 /* propagate synchronization to later actions */
2359 action_list_t::reverse_iterator rit = action_trace->rbegin();
2360 for (; (*rit) != acquire; rit++) {
2361 ModelAction *propagate = *rit;
2362 if (acquire->happens_before(propagate)) {
2363 propagate->synchronize_with(acquire);
2364 /* Re-check 'propagate' for mo_graph edges */
2365 work_queue->push_back(MOEdgeWorkEntry(propagate));
2370 it = pending_rel_seqs->erase(it);
2371 snapshot_free(pending);
2377 // If we resolved promises or data races, see if we have realized a data race.
2384 * Performs various bookkeeping operations for the current ModelAction. For
2385 * instance, adds action to the per-object, per-thread action vector and to the
2386 * action trace list of all thread actions.
2388 * @param act is the ModelAction to add.
2390 void ModelChecker::add_action_to_lists(ModelAction *act)
2392 int tid = id_to_int(act->get_tid());
2393 ModelAction *uninit = NULL;
2395 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2396 if (list->empty() && act->is_atomic_var()) {
2397 uninit = get_uninitialized_action(act);
2398 uninit_id = id_to_int(uninit->get_tid());
2399 list->push_front(uninit);
2401 list->push_back(act);
2403 action_trace->push_back(act);
2405 action_trace->push_front(uninit);
2407 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2408 if (tid >= (int)vec->size())
2409 vec->resize(priv->next_thread_id);
2410 (*vec)[tid].push_back(act);
2412 (*vec)[uninit_id].push_front(uninit);
2414 if ((int)thrd_last_action->size() <= tid)
2415 thrd_last_action->resize(get_num_threads());
2416 (*thrd_last_action)[tid] = act;
2418 (*thrd_last_action)[uninit_id] = uninit;
2420 if (act->is_fence() && act->is_release()) {
2421 if ((int)thrd_last_fence_release->size() <= tid)
2422 thrd_last_fence_release->resize(get_num_threads());
2423 (*thrd_last_fence_release)[tid] = act;
2426 if (act->is_wait()) {
2427 void *mutex_loc = (void *) act->get_value();
2428 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2430 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2431 if (tid >= (int)vec->size())
2432 vec->resize(priv->next_thread_id);
2433 (*vec)[tid].push_back(act);
2438 * @brief Get the last action performed by a particular Thread
2439 * @param tid The thread ID of the Thread in question
2440 * @return The last action in the thread
2442 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2444 int threadid = id_to_int(tid);
2445 if (threadid < (int)thrd_last_action->size())
2446 return (*thrd_last_action)[id_to_int(tid)];
2452 * @brief Get the last fence release performed by a particular Thread
2453 * @param tid The thread ID of the Thread in question
2454 * @return The last fence release in the thread, if one exists; NULL otherwise
2456 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2458 int threadid = id_to_int(tid);
2459 if (threadid < (int)thrd_last_fence_release->size())
2460 return (*thrd_last_fence_release)[id_to_int(tid)];
2466 * Gets the last memory_order_seq_cst write (in the total global sequence)
2467 * performed on a particular object (i.e., memory location), not including the
2469 * @param curr The current ModelAction; also denotes the object location to
2471 * @return The last seq_cst write
2473 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2475 void *location = curr->get_location();
2476 action_list_t *list = get_safe_ptr_action(obj_map, location);
2477 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2478 action_list_t::reverse_iterator rit;
2479 for (rit = list->rbegin(); (*rit) != curr; rit++)
2481 rit++; /* Skip past curr */
2482 for ( ; rit != list->rend(); rit++)
2483 if ((*rit)->is_write() && (*rit)->is_seqcst())
2489 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2490 * performed in a particular thread, prior to a particular fence.
2491 * @param tid The ID of the thread to check
2492 * @param before_fence The fence from which to begin the search; if NULL, then
2493 * search for the most recent fence in the thread.
2494 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2496 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2498 /* All fences should have NULL location */
2499 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2500 action_list_t::reverse_iterator rit = list->rbegin();
2503 for (; rit != list->rend(); rit++)
2504 if (*rit == before_fence)
2507 ASSERT(*rit == before_fence);
2511 for (; rit != list->rend(); rit++)
2512 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2518 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2519 * location). This function identifies the mutex according to the current
2520 * action, which is presumed to perform on the same mutex.
2521 * @param curr The current ModelAction; also denotes the object location to
2523 * @return The last unlock operation
2525 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2527 void *location = curr->get_location();
2528 action_list_t *list = get_safe_ptr_action(obj_map, location);
2529 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2530 action_list_t::reverse_iterator rit;
2531 for (rit = list->rbegin(); rit != list->rend(); rit++)
2532 if ((*rit)->is_unlock() || (*rit)->is_wait())
2537 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2539 ModelAction *parent = get_last_action(tid);
2541 parent = get_thread(tid)->get_creation();
2546 * Returns the clock vector for a given thread.
2547 * @param tid The thread whose clock vector we want
2548 * @return Desired clock vector
2550 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2552 return get_parent_action(tid)->get_cv();
2556 * @brief Find the promise (if any) to resolve for the current action and
2557 * remove it from the pending promise vector
2558 * @param curr The current ModelAction. Should be a write.
2559 * @return The Promise to resolve, if any; otherwise NULL
2561 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2563 for (unsigned int i = 0; i < promises->size(); i++)
2564 if (curr->get_node()->get_promise(i)) {
2565 Promise *ret = (*promises)[i];
2566 promises->erase(promises->begin() + i);
2573 * Resolve a Promise with a current write.
2574 * @param write The ModelAction that is fulfilling Promises
2575 * @param promise The Promise to resolve
2576 * @return True if the Promise was successfully resolved; false otherwise
2578 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2580 ModelVector<ModelAction *> actions_to_check;
2582 for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2583 ModelAction *read = promise->get_reader(i);
2584 read_from(read, write);
2585 actions_to_check.push_back(read);
2587 /* Make sure the promise's value matches the write's value */
2588 ASSERT(promise->is_compatible(write) && promise->same_value(write));
2589 if (!mo_graph->resolvePromise(promise, write))
2590 priv->failed_promise = true;
2593 * @todo It is possible to end up in an inconsistent state, where a
2594 * "resolved" promise may still be referenced if
2595 * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2597 * Note that the inconsistency only matters when dumping mo_graph to
2603 //Check whether reading these writes has made threads unable to
2605 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2606 ModelAction *read = actions_to_check[i];
2607 mo_check_promises(read, true);
2614 * Compute the set of promises that could potentially be satisfied by this
2615 * action. Note that the set computation actually appears in the Node, not in
2617 * @param curr The ModelAction that may satisfy promises
2619 void ModelChecker::compute_promises(ModelAction *curr)
2621 for (unsigned int i = 0; i < promises->size(); i++) {
2622 Promise *promise = (*promises)[i];
2623 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2626 bool satisfy = true;
2627 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2628 const ModelAction *act = promise->get_reader(j);
2629 if (act->happens_before(curr) ||
2630 act->could_synchronize_with(curr)) {
2636 curr->get_node()->set_promise(i);
2640 /** Checks promises in response to change in ClockVector Threads. */
2641 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2643 for (unsigned int i = 0; i < promises->size(); i++) {
2644 Promise *promise = (*promises)[i];
2645 if (!promise->thread_is_available(tid))
2647 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2648 const ModelAction *act = promise->get_reader(j);
2649 if ((!old_cv || !old_cv->synchronized_since(act)) &&
2650 merge_cv->synchronized_since(act)) {
2651 if (promise->eliminate_thread(tid)) {
2652 /* Promise has failed */
2653 priv->failed_promise = true;
2661 void ModelChecker::check_promises_thread_disabled()
2663 for (unsigned int i = 0; i < promises->size(); i++) {
2664 Promise *promise = (*promises)[i];
2665 if (promise->has_failed()) {
2666 priv->failed_promise = true;
2673 * @brief Checks promises in response to addition to modification order for
2676 * We test whether threads are still available for satisfying promises after an
2677 * addition to our modification order constraints. Those that are unavailable
2678 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2679 * that promise has failed.
2681 * @param act The ModelAction which updated the modification order
2682 * @param is_read_check Should be true if act is a read and we must check for
2683 * updates to the store from which it read (there is a distinction here for
2684 * RMW's, which are both a load and a store)
2686 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2688 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2690 for (unsigned int i = 0; i < promises->size(); i++) {
2691 Promise *promise = (*promises)[i];
2693 // Is this promise on the same location?
2694 if (!promise->same_location(write))
2697 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2698 const ModelAction *pread = promise->get_reader(j);
2699 if (!pread->happens_before(act))
2701 if (mo_graph->checkPromise(write, promise)) {
2702 priv->failed_promise = true;
2708 // Don't do any lookups twice for the same thread
2709 if (!promise->thread_is_available(act->get_tid()))
2712 if (mo_graph->checkReachable(promise, write)) {
2713 if (mo_graph->checkPromise(write, promise)) {
2714 priv->failed_promise = true;
2722 * Compute the set of writes that may break the current pending release
2723 * sequence. This information is extracted from previou release sequence
2726 * @param curr The current ModelAction. Must be a release sequence fixup
2729 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2731 if (pending_rel_seqs->empty())
2734 struct release_seq *pending = pending_rel_seqs->back();
2735 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2736 const ModelAction *write = pending->writes[i];
2737 curr->get_node()->add_relseq_break(write);
2740 /* NULL means don't break the sequence; just synchronize */
2741 curr->get_node()->add_relseq_break(NULL);
2745 * Build up an initial set of all past writes that this 'read' action may read
2746 * from, as well as any previously-observed future values that must still be valid.
2748 * @param curr is the current ModelAction that we are exploring; it must be a
2751 void ModelChecker::build_may_read_from(ModelAction *curr)
2753 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2755 ASSERT(curr->is_read());
2757 ModelAction *last_sc_write = NULL;
2759 if (curr->is_seqcst())
2760 last_sc_write = get_last_seq_cst_write(curr);
2762 /* Iterate over all threads */
2763 for (i = 0; i < thrd_lists->size(); i++) {
2764 /* Iterate over actions in thread, starting from most recent */
2765 action_list_t *list = &(*thrd_lists)[i];
2766 action_list_t::reverse_iterator rit;
2767 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2768 ModelAction *act = *rit;
2770 /* Only consider 'write' actions */
2771 if (!act->is_write() || act == curr)
2774 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2775 bool allow_read = true;
2777 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2779 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2783 /* Only add feasible reads */
2784 mo_graph->startChanges();
2785 r_modification_order(curr, act);
2786 if (!is_infeasible())
2787 curr->get_node()->add_read_from_past(act);
2788 mo_graph->rollbackChanges();
2791 /* Include at most one act per-thread that "happens before" curr */
2792 if (act->happens_before(curr))
2797 /* Inherit existing, promised future values */
2798 for (i = 0; i < promises->size(); i++) {
2799 const Promise *promise = (*promises)[i];
2800 const ModelAction *promise_read = promise->get_reader(0);
2801 if (promise_read->same_var(curr)) {
2802 /* Only add feasible future-values */
2803 mo_graph->startChanges();
2804 r_modification_order(curr, promise);
2805 if (!is_infeasible())
2806 curr->get_node()->add_read_from_promise(promise_read);
2807 mo_graph->rollbackChanges();
2811 /* We may find no valid may-read-from only if the execution is doomed */
2812 if (!curr->get_node()->read_from_size()) {
2813 priv->no_valid_reads = true;
2817 if (DBG_ENABLED()) {
2818 model_print("Reached read action:\n");
2820 model_print("Printing read_from_past\n");
2821 curr->get_node()->print_read_from_past();
2822 model_print("End printing read_from_past\n");
2826 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2828 for ( ; write != NULL; write = write->get_reads_from()) {
2829 /* UNINIT actions don't have a Node, and they never sleep */
2830 if (write->is_uninitialized())
2832 Node *prevnode = write->get_node()->get_parent();
2834 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2835 if (write->is_release() && thread_sleep)
2837 if (!write->is_rmw())
2844 * @brief Get an action representing an uninitialized atomic
2846 * This function may create a new one or try to retrieve one from the NodeStack
2848 * @param curr The current action, which prompts the creation of an UNINIT action
2849 * @return A pointer to the UNINIT ModelAction
2851 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2853 Node *node = curr->get_node();
2854 ModelAction *act = node->get_uninit_action();
2856 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2857 node->set_uninit_action(act);
2859 act->create_cv(NULL);
2863 static void print_list(action_list_t *list)
2865 action_list_t::iterator it;
2867 model_print("---------------------------------------------------------------------\n");
2869 unsigned int hash = 0;
2871 for (it = list->begin(); it != list->end(); it++) {
2872 const ModelAction *act = *it;
2873 if (act->get_seq_number() > 0)
2875 hash = hash^(hash<<3)^((*it)->hash());
2877 model_print("HASH %u\n", hash);
2878 model_print("---------------------------------------------------------------------\n");
2881 #if SUPPORT_MOD_ORDER_DUMP
2882 void ModelChecker::dumpGraph(char *filename) const
2885 sprintf(buffer, "%s.dot", filename);
2886 FILE *file = fopen(buffer, "w");
2887 fprintf(file, "digraph %s {\n", filename);
2888 mo_graph->dumpNodes(file);
2889 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2891 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2892 ModelAction *act = *it;
2893 if (act->is_read()) {
2894 mo_graph->dot_print_node(file, act);
2895 if (act->get_reads_from())
2896 mo_graph->dot_print_edge(file,
2897 act->get_reads_from(),
2899 "label=\"rf\", color=red, weight=2");
2901 mo_graph->dot_print_edge(file,
2902 act->get_reads_from_promise(),
2904 "label=\"rf\", color=red");
2906 if (thread_array[act->get_tid()]) {
2907 mo_graph->dot_print_edge(file,
2908 thread_array[id_to_int(act->get_tid())],
2910 "label=\"sb\", color=blue, weight=400");
2913 thread_array[act->get_tid()] = act;
2915 fprintf(file, "}\n");
2916 model_free(thread_array);
2921 /** @brief Prints an execution trace summary. */
2922 void ModelChecker::print_summary() const
2924 #if SUPPORT_MOD_ORDER_DUMP
2925 char buffername[100];
2926 sprintf(buffername, "exec%04u", stats.num_total);
2927 mo_graph->dumpGraphToFile(buffername);
2928 sprintf(buffername, "graph%04u", stats.num_total);
2929 dumpGraph(buffername);
2932 model_print("Execution %d:", stats.num_total);
2933 if (isfeasibleprefix()) {
2934 if (scheduler->all_threads_sleeping())
2935 model_print(" SLEEP-SET REDUNDANT");
2938 print_infeasibility(" INFEASIBLE");
2939 print_list(action_trace);
2941 if (!promises->empty()) {
2942 model_print("Pending promises:\n");
2943 for (unsigned int i = 0; i < promises->size(); i++) {
2944 model_print(" [P%u] ", i);
2945 (*promises)[i]->print();
2952 * Add a Thread to the system for the first time. Should only be called once
2954 * @param t The Thread to add
2956 void ModelChecker::add_thread(Thread *t)
2958 thread_map->put(id_to_int(t->get_id()), t);
2959 scheduler->add_thread(t);
2963 * @brief Get a Thread reference by its ID
2964 * @param tid The Thread's ID
2965 * @return A Thread reference
2967 Thread * ModelChecker::get_thread(thread_id_t tid) const
2969 return thread_map->get(id_to_int(tid));
2973 * @brief Get a reference to the Thread in which a ModelAction was executed
2974 * @param act The ModelAction
2975 * @return A Thread reference
2977 Thread * ModelChecker::get_thread(const ModelAction *act) const
2979 return get_thread(act->get_tid());
2983 * @brief Get a Promise's "promise number"
2985 * A "promise number" is an index number that is unique to a promise, valid
2986 * only for a specific snapshot of an execution trace. Promises may come and go
2987 * as they are generated an resolved, so an index only retains meaning for the
2990 * @param promise The Promise to check
2991 * @return The promise index, if the promise still is valid; otherwise -1
2993 int ModelChecker::get_promise_number(const Promise *promise) const
2995 for (unsigned int i = 0; i < promises->size(); i++)
2996 if ((*promises)[i] == promise)
3003 * @brief Check if a Thread is currently enabled
3004 * @param t The Thread to check
3005 * @return True if the Thread is currently enabled
3007 bool ModelChecker::is_enabled(Thread *t) const
3009 return scheduler->is_enabled(t);
3013 * @brief Check if a Thread is currently enabled
3014 * @param tid The ID of the Thread to check
3015 * @return True if the Thread is currently enabled
3017 bool ModelChecker::is_enabled(thread_id_t tid) const
3019 return scheduler->is_enabled(tid);
3023 * Switch from a model-checker context to a user-thread context. This is the
3024 * complement of ModelChecker::switch_to_master and must be called from the
3025 * model-checker context
3027 * @param thread The user-thread to switch to
3029 void ModelChecker::switch_from_master(Thread *thread)
3031 scheduler->set_current_thread(thread);
3032 Thread::swap(&system_context, thread);
3036 * Switch from a user-context to the "master thread" context (a.k.a. system
3037 * context). This switch is made with the intention of exploring a particular
3038 * model-checking action (described by a ModelAction object). Must be called
3039 * from a user-thread context.
3041 * @param act The current action that will be explored. May be NULL only if
3042 * trace is exiting via an assertion (see ModelChecker::set_assert and
3043 * ModelChecker::has_asserted).
3044 * @return Return the value returned by the current action
3046 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3049 Thread *old = thread_current();
3050 scheduler->set_current_thread(NULL);
3051 ASSERT(!old->get_pending());
3052 old->set_pending(act);
3053 if (Thread::swap(old, &system_context) < 0) {
3054 perror("swap threads");
3057 return old->get_return_value();
3061 * Takes the next step in the execution, if possible.
3062 * @param curr The current step to take
3063 * @return Returns the next Thread to run, if any; NULL if this execution
3066 Thread * ModelChecker::take_step(ModelAction *curr)
3068 Thread *curr_thrd = get_thread(curr);
3069 ASSERT(curr_thrd->get_state() == THREAD_READY);
3071 if (!check_action_enabled(curr)) {
3072 /* Make the execution look like we chose to run this action
3073 * much later, when a lock/join can succeed */
3074 get_thread(curr)->set_pending(curr);
3075 scheduler->sleep(curr_thrd);
3078 curr = check_current_action(curr);
3082 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3083 scheduler->remove_thread(curr_thrd);
3086 return action_select_next_thread(curr);
3090 /** Wrapper to run the user's main function, with appropriate arguments */
3091 void user_main_wrapper(void *)
3093 user_main(model->params.argc, model->params.argv);
3096 bool ModelChecker::should_terminate_execution()
3098 /* Infeasible -> don't take any more steps */
3099 if (is_infeasible())
3101 else if (isfeasibleprefix() && have_bug_reports()) {
3106 if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3111 /** @brief Run ModelChecker for the user program */
3112 void ModelChecker::run()
3116 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3121 * Stash next pending action(s) for thread(s). There
3122 * should only need to stash one thread's action--the
3123 * thread which just took a step--plus the first step
3124 * for any newly-created thread
3126 for (unsigned int i = 0; i < get_num_threads(); i++) {
3127 thread_id_t tid = int_to_id(i);
3128 Thread *thr = get_thread(tid);
3129 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3130 switch_from_master(thr);
3131 if (thr->is_waiting_on(thr))
3132 assert_bug("Deadlock detected");
3136 /* Catch assertions from prior take_step or from
3137 * between-ModelAction bugs (e.g., data races) */
3142 t = get_next_thread();
3143 if (!t || t->is_model_thread())
3146 /* Consume the next action for a Thread */
3147 ModelAction *curr = t->get_pending();
3148 t->set_pending(NULL);
3149 t = take_step(curr);
3150 } while (!should_terminate_execution());
3153 * Launch end-of-execution release sequence fixups only when
3154 * the execution is otherwise feasible AND there are:
3156 * (1) pending release sequences
3157 * (2) pending assertions that could be invalidated by a change
3158 * in clock vectors (i.e., data races)
3159 * (3) no pending promises
3161 while (!pending_rel_seqs->empty() &&
3162 is_feasible_prefix_ignore_relseq() &&
3163 !unrealizedraces.empty()) {
3164 model_print("*** WARNING: release sequence fixup action "
3165 "(%zu pending release seuqence(s)) ***\n",
3166 pending_rel_seqs->size());
3167 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3168 std::memory_order_seq_cst, NULL, VALUE_NONE,
3172 } while (next_execution());
3174 model_print("******* Model-checking complete: *******\n");