10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
42 /* First thread created will have id INITIAL_THREAD_ID */
43 next_thread_id(INITIAL_THREAD_ID),
44 used_sequence_numbers(0),
48 failed_promise(false),
49 too_many_reads(false),
50 no_valid_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 unsigned int next_thread_id;
62 modelclock_t used_sequence_numbers;
63 ModelAction *next_backtrack;
64 SnapVector<bug_message *> bugs;
65 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
88 promises(new SnapVector<Promise *>()),
89 futurevalues(new SnapVector<struct PendingFutureValue>()),
90 pending_rel_seqs(new SnapVector<struct release_seq *>()),
91 thrd_last_action(new SnapVector<ModelAction *>(1)),
92 thrd_last_fence_release(new SnapVector<ModelAction *>()),
93 node_stack(new NodeStack()),
94 priv(new struct model_snapshot_members()),
95 mo_graph(new CycleGraph())
97 /* Initialize a model-checker thread, for special ModelActions */
98 model_thread = new Thread(get_next_id());
99 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
102 /** @brief Destructor */
103 ModelChecker::~ModelChecker()
105 for (unsigned int i = 0; i < get_num_threads(); i++)
106 delete thread_map->get(i);
111 delete condvar_waiters_map;
114 for (unsigned int i = 0; i < promises->size(); i++)
115 delete (*promises)[i];
118 delete pending_rel_seqs;
120 delete thrd_last_action;
121 delete thrd_last_fence_release;
128 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
130 action_list_t *tmp = hash->get(ptr);
132 tmp = new action_list_t();
138 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
140 SnapVector<action_list_t> *tmp = hash->get(ptr);
142 tmp = new SnapVector<action_list_t>();
149 * Restores user program to initial state and resets all model-checker data
152 void ModelChecker::reset_to_initial_state()
154 DEBUG("+++ Resetting to initial state +++\n");
155 node_stack->reset_execution();
158 * FIXME: if we utilize partial rollback, we will need to free only
159 * those pending actions which were NOT pending before the rollback
162 for (unsigned int i = 0; i < get_num_threads(); i++)
163 delete get_thread(int_to_id(i))->get_pending();
165 snapshot_backtrack_before(0);
168 /** @return a thread ID for a new Thread */
169 thread_id_t ModelChecker::get_next_id()
171 return priv->next_thread_id++;
174 /** @return the number of user threads created during this execution */
175 unsigned int ModelChecker::get_num_threads() const
177 return priv->next_thread_id;
181 * Must be called from user-thread context (e.g., through the global
182 * thread_current() interface)
184 * @return The currently executing Thread.
186 Thread * ModelChecker::get_current_thread() const
188 return scheduler->get_current_thread();
191 /** @return a sequence number for a new ModelAction */
192 modelclock_t ModelChecker::get_next_seq_num()
194 return ++priv->used_sequence_numbers;
197 Node * ModelChecker::get_curr_node() const
199 return node_stack->get_head();
203 * @brief Select the next thread to execute based on the curren action
205 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
206 * actions should be followed by the execution of their child thread. In either
207 * case, the current action should determine the next thread schedule.
209 * @param curr The current action
210 * @return The next thread to run, if the current action will determine this
211 * selection; otherwise NULL
213 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
215 /* Do not split atomic RMW */
217 return get_thread(curr);
218 /* Follow CREATE with the created thread */
219 if (curr->get_type() == THREAD_CREATE)
220 return curr->get_thread_operand();
225 * @brief Choose the next thread to execute.
227 * This function chooses the next thread that should execute. It can enforce
228 * execution replay/backtracking or, if the model-checker has no preference
229 * regarding the next thread (i.e., when exploring a new execution ordering),
230 * we defer to the scheduler.
232 * @return The next chosen thread to run, if any exist. Or else if the current
233 * execution should terminate, return NULL.
235 Thread * ModelChecker::get_next_thread()
240 * Have we completed exploring the preselected path? Then let the
244 return scheduler->select_next_thread();
246 /* Else, we are trying to replay an execution */
247 ModelAction *next = node_stack->get_next()->get_action();
249 if (next == diverge) {
250 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
251 earliest_diverge = diverge;
253 Node *nextnode = next->get_node();
254 Node *prevnode = nextnode->get_parent();
255 scheduler->update_sleep_set(prevnode);
257 /* Reached divergence point */
258 if (nextnode->increment_behaviors()) {
259 /* Execute the same thread with a new behavior */
260 tid = next->get_tid();
261 node_stack->pop_restofstack(2);
264 /* Make a different thread execute for next step */
265 scheduler->add_sleep(get_thread(next->get_tid()));
266 tid = prevnode->get_next_backtrack();
267 /* Make sure the backtracked thread isn't sleeping. */
268 node_stack->pop_restofstack(1);
269 if (diverge == earliest_diverge) {
270 earliest_diverge = prevnode->get_action();
273 /* Start the round robin scheduler from this thread id */
274 scheduler->set_scheduler_thread(tid);
275 /* The correct sleep set is in the parent node. */
278 DEBUG("*** Divergence point ***\n");
282 tid = next->get_tid();
284 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
285 ASSERT(tid != THREAD_ID_T_NONE);
286 return get_thread(id_to_int(tid));
290 * We need to know what the next actions of all threads in the sleep
291 * set will be. This method computes them and stores the actions at
292 * the corresponding thread object's pending action.
295 void ModelChecker::execute_sleep_set()
297 for (unsigned int i = 0; i < get_num_threads(); i++) {
298 thread_id_t tid = int_to_id(i);
299 Thread *thr = get_thread(tid);
300 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
301 thr->get_pending()->set_sleep_flag();
307 * @brief Should the current action wake up a given thread?
309 * @param curr The current action
310 * @param thread The thread that we might wake up
311 * @return True, if we should wake up the sleeping thread; false otherwise
313 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
315 const ModelAction *asleep = thread->get_pending();
316 /* Don't allow partial RMW to wake anyone up */
319 /* Synchronizing actions may have been backtracked */
320 if (asleep->could_synchronize_with(curr))
322 /* All acquire/release fences and fence-acquire/store-release */
323 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
325 /* Fence-release + store can awake load-acquire on the same location */
326 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
327 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
328 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
334 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
336 for (unsigned int i = 0; i < get_num_threads(); i++) {
337 Thread *thr = get_thread(int_to_id(i));
338 if (scheduler->is_sleep_set(thr)) {
339 if (should_wake_up(curr, thr))
340 /* Remove this thread from sleep set */
341 scheduler->remove_sleep(thr);
346 /** @brief Alert the model-checker that an incorrectly-ordered
347 * synchronization was made */
348 void ModelChecker::set_bad_synchronization()
350 priv->bad_synchronization = true;
354 * Check whether the current trace has triggered an assertion which should halt
357 * @return True, if the execution should be aborted; false otherwise
359 bool ModelChecker::has_asserted() const
361 return priv->asserted;
365 * Trigger a trace assertion which should cause this execution to be halted.
366 * This can be due to a detected bug or due to an infeasibility that should
369 void ModelChecker::set_assert()
371 priv->asserted = true;
375 * Check if we are in a deadlock. Should only be called at the end of an
376 * execution, although it should not give false positives in the middle of an
377 * execution (there should be some ENABLED thread).
379 * @return True if program is in a deadlock; false otherwise
381 bool ModelChecker::is_deadlocked() const
383 bool blocking_threads = false;
384 for (unsigned int i = 0; i < get_num_threads(); i++) {
385 thread_id_t tid = int_to_id(i);
388 Thread *t = get_thread(tid);
389 if (!t->is_model_thread() && t->get_pending())
390 blocking_threads = true;
392 return blocking_threads;
396 * Check if this is a complete execution. That is, have all thread completed
397 * execution (rather than exiting because sleep sets have forced a redundant
400 * @return True if the execution is complete.
402 bool ModelChecker::is_complete_execution() const
404 for (unsigned int i = 0; i < get_num_threads(); i++)
405 if (is_enabled(int_to_id(i)))
411 * @brief Assert a bug in the executing program.
413 * Use this function to assert any sort of bug in the user program. If the
414 * current trace is feasible (actually, a prefix of some feasible execution),
415 * then this execution will be aborted, printing the appropriate message. If
416 * the current trace is not yet feasible, the error message will be stashed and
417 * printed if the execution ever becomes feasible.
419 * @param msg Descriptive message for the bug (do not include newline char)
420 * @return True if bug is immediately-feasible
422 bool ModelChecker::assert_bug(const char *msg)
424 priv->bugs.push_back(new bug_message(msg));
426 if (isfeasibleprefix()) {
434 * @brief Assert a bug in the executing program, asserted by a user thread
435 * @see ModelChecker::assert_bug
436 * @param msg Descriptive message for the bug (do not include newline char)
438 void ModelChecker::assert_user_bug(const char *msg)
440 /* If feasible bug, bail out now */
442 switch_to_master(NULL);
445 /** @return True, if any bugs have been reported for this execution */
446 bool ModelChecker::have_bug_reports() const
448 return priv->bugs.size() != 0;
451 /** @brief Print bug report listing for this execution (if any bugs exist) */
452 void ModelChecker::print_bugs() const
454 if (have_bug_reports()) {
455 model_print("Bug report: %zu bug%s detected\n",
457 priv->bugs.size() > 1 ? "s" : "");
458 for (unsigned int i = 0; i < priv->bugs.size(); i++)
459 priv->bugs[i]->print();
464 * @brief Record end-of-execution stats
466 * Must be run when exiting an execution. Records various stats.
467 * @see struct execution_stats
469 void ModelChecker::record_stats()
472 if (!isfeasibleprefix())
473 stats.num_infeasible++;
474 else if (have_bug_reports())
475 stats.num_buggy_executions++;
476 else if (is_complete_execution())
477 stats.num_complete++;
479 stats.num_redundant++;
482 * @todo We can violate this ASSERT() when fairness/sleep sets
483 * conflict to cause an execution to terminate, e.g. with:
484 * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
486 //ASSERT(scheduler->all_threads_sleeping());
490 /** @brief Print execution stats */
491 void ModelChecker::print_stats() const
493 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
494 model_print("Number of redundant executions: %d\n", stats.num_redundant);
495 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
496 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
497 model_print("Total executions: %d\n", stats.num_total);
498 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
502 * @brief End-of-exeuction print
503 * @param printbugs Should any existing bugs be printed?
505 void ModelChecker::print_execution(bool printbugs) const
507 print_program_output();
509 if (params.verbose) {
510 model_print("Earliest divergence point since last feasible execution:\n");
511 if (earliest_diverge)
512 earliest_diverge->print();
514 model_print("(Not set)\n");
520 /* Don't print invalid bugs */
529 * Queries the model-checker for more executions to explore and, if one
530 * exists, resets the model-checker state to execute a new execution.
532 * @return If there are more executions to explore, return true. Otherwise,
535 bool ModelChecker::next_execution()
538 /* Is this execution a feasible execution that's worth bug-checking? */
539 bool complete = isfeasibleprefix() && (is_complete_execution() ||
542 /* End-of-execution bug checks */
545 assert_bug("Deadlock detected");
553 if (params.verbose || (complete && have_bug_reports()))
554 print_execution(complete);
556 clear_program_output();
559 earliest_diverge = NULL;
561 if ((diverge = get_next_backtrack()) == NULL)
565 model_print("Next execution will diverge at:\n");
569 reset_to_initial_state();
574 * @brief Find the last fence-related backtracking conflict for a ModelAction
576 * This function performs the search for the most recent conflicting action
577 * against which we should perform backtracking, as affected by fence
578 * operations. This includes pairs of potentially-synchronizing actions which
579 * occur due to fence-acquire or fence-release, and hence should be explored in
580 * the opposite execution order.
582 * @param act The current action
583 * @return The most recent action which conflicts with act due to fences
585 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
587 /* Only perform release/acquire fence backtracking for stores */
588 if (!act->is_write())
591 /* Find a fence-release (or, act is a release) */
592 ModelAction *last_release;
593 if (act->is_release())
596 last_release = get_last_fence_release(act->get_tid());
600 /* Skip past the release */
601 action_list_t *list = action_trace;
602 action_list_t::reverse_iterator rit;
603 for (rit = list->rbegin(); rit != list->rend(); rit++)
604 if (*rit == last_release)
606 ASSERT(rit != list->rend());
611 * load --sb-> fence-acquire */
612 ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
613 ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
614 bool found_acquire_fences = false;
615 for ( ; rit != list->rend(); rit++) {
616 ModelAction *prev = *rit;
617 if (act->same_thread(prev))
620 int tid = id_to_int(prev->get_tid());
622 if (prev->is_read() && act->same_var(prev)) {
623 if (prev->is_acquire()) {
624 /* Found most recent load-acquire, don't need
625 * to search for more fences */
626 if (!found_acquire_fences)
629 prior_loads[tid] = prev;
632 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
633 found_acquire_fences = true;
634 acquire_fences[tid] = prev;
638 ModelAction *latest_backtrack = NULL;
639 for (unsigned int i = 0; i < acquire_fences.size(); i++)
640 if (acquire_fences[i] && prior_loads[i])
641 if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
642 latest_backtrack = acquire_fences[i];
643 return latest_backtrack;
647 * @brief Find the last backtracking conflict for a ModelAction
649 * This function performs the search for the most recent conflicting action
650 * against which we should perform backtracking. This primary includes pairs of
651 * synchronizing actions which should be explored in the opposite execution
654 * @param act The current action
655 * @return The most recent action which conflicts with act
657 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
659 switch (act->get_type()) {
660 /* case ATOMIC_FENCE: fences don't directly cause backtracking */
664 ModelAction *ret = NULL;
666 /* linear search: from most recent to oldest */
667 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
668 action_list_t::reverse_iterator rit;
669 for (rit = list->rbegin(); rit != list->rend(); rit++) {
670 ModelAction *prev = *rit;
671 if (prev->could_synchronize_with(act)) {
677 ModelAction *ret2 = get_last_fence_conflict(act);
687 case ATOMIC_TRYLOCK: {
688 /* linear search: from most recent to oldest */
689 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
690 action_list_t::reverse_iterator rit;
691 for (rit = list->rbegin(); rit != list->rend(); rit++) {
692 ModelAction *prev = *rit;
693 if (act->is_conflicting_lock(prev))
698 case ATOMIC_UNLOCK: {
699 /* linear search: from most recent to oldest */
700 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
701 action_list_t::reverse_iterator rit;
702 for (rit = list->rbegin(); rit != list->rend(); rit++) {
703 ModelAction *prev = *rit;
704 if (!act->same_thread(prev) && prev->is_failed_trylock())
710 /* linear search: from most recent to oldest */
711 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
712 action_list_t::reverse_iterator rit;
713 for (rit = list->rbegin(); rit != list->rend(); rit++) {
714 ModelAction *prev = *rit;
715 if (!act->same_thread(prev) && prev->is_failed_trylock())
717 if (!act->same_thread(prev) && prev->is_notify())
723 case ATOMIC_NOTIFY_ALL:
724 case ATOMIC_NOTIFY_ONE: {
725 /* linear search: from most recent to oldest */
726 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
727 action_list_t::reverse_iterator rit;
728 for (rit = list->rbegin(); rit != list->rend(); rit++) {
729 ModelAction *prev = *rit;
730 if (!act->same_thread(prev) && prev->is_wait())
741 /** This method finds backtracking points where we should try to
742 * reorder the parameter ModelAction against.
744 * @param the ModelAction to find backtracking points for.
746 void ModelChecker::set_backtracking(ModelAction *act)
748 Thread *t = get_thread(act);
749 ModelAction *prev = get_last_conflict(act);
753 Node *node = prev->get_node()->get_parent();
755 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
756 int low_tid, high_tid;
757 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
758 low_tid = id_to_int(act->get_tid());
759 high_tid = low_tid + 1;
762 high_tid = get_num_threads();
765 for (int i = low_tid; i < high_tid; i++) {
766 thread_id_t tid = int_to_id(i);
768 /* Make sure this thread can be enabled here. */
769 if (i >= node->get_num_threads())
772 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
773 /* Don't backtrack into a point where the thread is disabled or sleeping. */
774 if (node->enabled_status(tid) != THREAD_ENABLED)
777 /* Check if this has been explored already */
778 if (node->has_been_explored(tid))
781 /* See if fairness allows */
782 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
784 for (int t = 0; t < node->get_num_threads(); t++) {
785 thread_id_t tother = int_to_id(t);
786 if (node->is_enabled(tother) && node->has_priority(tother)) {
795 /* See if CHESS-like yield fairness allows */
796 if (model->params.yieldon) {
798 for (int t = 0; t < node->get_num_threads(); t++) {
799 thread_id_t tother = int_to_id(t);
800 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
809 /* Cache the latest backtracking point */
810 set_latest_backtrack(prev);
812 /* If this is a new backtracking point, mark the tree */
813 if (!node->set_backtrack(tid))
815 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
816 id_to_int(prev->get_tid()),
817 id_to_int(t->get_id()));
826 * @brief Cache the a backtracking point as the "most recent", if eligible
828 * Note that this does not prepare the NodeStack for this backtracking
829 * operation, it only caches the action on a per-execution basis
831 * @param act The operation at which we should explore a different next action
832 * (i.e., backtracking point)
833 * @return True, if this action is now the most recent backtracking point;
836 bool ModelChecker::set_latest_backtrack(ModelAction *act)
838 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
839 priv->next_backtrack = act;
846 * Returns last backtracking point. The model checker will explore a different
847 * path for this point in the next execution.
848 * @return The ModelAction at which the next execution should diverge.
850 ModelAction * ModelChecker::get_next_backtrack()
852 ModelAction *next = priv->next_backtrack;
853 priv->next_backtrack = NULL;
858 * Processes a read model action.
859 * @param curr is the read model action to process.
860 * @return True if processing this read updates the mo_graph.
862 bool ModelChecker::process_read(ModelAction *curr)
864 Node *node = curr->get_node();
866 bool updated = false;
867 switch (node->get_read_from_status()) {
868 case READ_FROM_PAST: {
869 const ModelAction *rf = node->get_read_from_past();
872 mo_graph->startChanges();
874 ASSERT(!is_infeasible());
875 if (!check_recency(curr, rf)) {
876 if (node->increment_read_from()) {
877 mo_graph->rollbackChanges();
880 priv->too_many_reads = true;
884 updated = r_modification_order(curr, rf);
886 mo_graph->commitChanges();
887 mo_check_promises(curr, true);
890 case READ_FROM_PROMISE: {
891 Promise *promise = curr->get_node()->get_read_from_promise();
892 if (promise->add_reader(curr))
893 priv->failed_promise = true;
894 curr->set_read_from_promise(promise);
895 mo_graph->startChanges();
896 if (!check_recency(curr, promise))
897 priv->too_many_reads = true;
898 updated = r_modification_order(curr, promise);
899 mo_graph->commitChanges();
902 case READ_FROM_FUTURE: {
903 /* Read from future value */
904 struct future_value fv = node->get_future_value();
905 Promise *promise = new Promise(curr, fv);
906 curr->set_read_from_promise(promise);
907 promises->push_back(promise);
908 mo_graph->startChanges();
909 updated = r_modification_order(curr, promise);
910 mo_graph->commitChanges();
916 get_thread(curr)->set_return_value(curr->get_return_value());
922 * Processes a lock, trylock, or unlock model action. @param curr is
923 * the read model action to process.
925 * The try lock operation checks whether the lock is taken. If not,
926 * it falls to the normal lock operation case. If so, it returns
929 * The lock operation has already been checked that it is enabled, so
930 * it just grabs the lock and synchronizes with the previous unlock.
932 * The unlock operation has to re-enable all of the threads that are
933 * waiting on the lock.
935 * @return True if synchronization was updated; false otherwise
937 bool ModelChecker::process_mutex(ModelAction *curr)
939 std::mutex *mutex = curr->get_mutex();
940 struct std::mutex_state *state = NULL;
943 state = mutex->get_state();
945 switch (curr->get_type()) {
946 case ATOMIC_TRYLOCK: {
947 bool success = !state->locked;
948 curr->set_try_lock(success);
950 get_thread(curr)->set_return_value(0);
953 get_thread(curr)->set_return_value(1);
955 //otherwise fall into the lock case
957 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
958 assert_bug("Lock access before initialization");
959 state->locked = get_thread(curr);
960 ModelAction *unlock = get_last_unlock(curr);
961 //synchronize with the previous unlock statement
962 if (unlock != NULL) {
963 curr->synchronize_with(unlock);
969 case ATOMIC_UNLOCK: {
970 /* wake up the other threads */
971 for (unsigned int i = 0; i < get_num_threads(); i++) {
972 Thread *t = get_thread(int_to_id(i));
973 Thread *curr_thrd = get_thread(curr);
974 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
978 /* unlock the lock - after checking who was waiting on it */
979 state->locked = NULL;
981 if (!curr->is_wait())
982 break; /* The rest is only for ATOMIC_WAIT */
984 /* Should we go to sleep? (simulate spurious failures) */
985 if (curr->get_node()->get_misc() == 0) {
986 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
988 scheduler->sleep(get_thread(curr));
992 case ATOMIC_NOTIFY_ALL: {
993 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
994 //activate all the waiting threads
995 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
996 scheduler->wake(get_thread(*rit));
1001 case ATOMIC_NOTIFY_ONE: {
1002 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1003 int wakeupthread = curr->get_node()->get_misc();
1004 action_list_t::iterator it = waiters->begin();
1005 advance(it, wakeupthread);
1006 scheduler->wake(get_thread(*it));
1018 * @brief Check if the current pending promises allow a future value to be sent
1020 * If one of the following is true:
1021 * (a) there are no pending promises
1022 * (b) the reader and writer do not cross any promises
1023 * Then, it is safe to pass a future value back now.
1025 * Otherwise, we must save the pending future value until (a) or (b) is true
1027 * @param writer The operation which sends the future value. Must be a write.
1028 * @param reader The operation which will observe the value. Must be a read.
1029 * @return True if the future value can be sent now; false if it must wait.
1031 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1032 const ModelAction *reader) const
1034 if (promises->empty())
1036 for(int i=promises->size()-1;i>=0;i--) {
1037 ModelAction *pr=(*promises)[i]->get_reader(0);
1038 //reader is after promise...doesn't cross any promise
1041 //writer is after promise, reader before...bad...
1049 * @brief Add a future value to a reader
1051 * This function performs a few additional checks to ensure that the future
1052 * value can be feasibly observed by the reader
1054 * @param writer The operation whose value is sent. Must be a write.
1055 * @param reader The read operation which may read the future value. Must be a read.
1057 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1059 /* Do more ambitious checks now that mo is more complete */
1060 if (!mo_may_allow(writer, reader))
1063 Node *node = reader->get_node();
1065 /* Find an ancestor thread which exists at the time of the reader */
1066 Thread *write_thread = get_thread(writer);
1067 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1068 write_thread = write_thread->get_parent();
1070 struct future_value fv = {
1071 writer->get_write_value(),
1072 writer->get_seq_number() + params.maxfuturedelay,
1073 write_thread->get_id(),
1075 if (node->add_future_value(fv))
1076 set_latest_backtrack(reader);
1080 * Process a write ModelAction
1081 * @param curr The ModelAction to process
1082 * @return True if the mo_graph was updated or promises were resolved
1084 bool ModelChecker::process_write(ModelAction *curr)
1086 /* Readers to which we may send our future value */
1087 ModelVector<ModelAction *> send_fv;
1089 const ModelAction *earliest_promise_reader;
1090 bool updated_promises = false;
1092 bool updated_mod_order = w_modification_order(curr, &send_fv);
1093 Promise *promise = pop_promise_to_resolve(curr);
1096 earliest_promise_reader = promise->get_reader(0);
1097 updated_promises = resolve_promise(curr, promise);
1099 earliest_promise_reader = NULL;
1101 for (unsigned int i = 0; i < send_fv.size(); i++) {
1102 ModelAction *read = send_fv[i];
1104 /* Don't send future values to reads after the Promise we resolve */
1105 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1106 /* Check if future value can be sent immediately */
1107 if (promises_may_allow(curr, read)) {
1108 add_future_value(curr, read);
1110 futurevalues->push_back(PendingFutureValue(curr, read));
1115 /* Check the pending future values */
1116 for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1117 struct PendingFutureValue pfv = (*futurevalues)[i];
1118 if (promises_may_allow(pfv.writer, pfv.reader)) {
1119 add_future_value(pfv.writer, pfv.reader);
1120 futurevalues->erase(futurevalues->begin() + i);
1124 mo_graph->commitChanges();
1125 mo_check_promises(curr, false);
1127 get_thread(curr)->set_return_value(VALUE_NONE);
1128 return updated_mod_order || updated_promises;
1132 * Process a fence ModelAction
1133 * @param curr The ModelAction to process
1134 * @return True if synchronization was updated
1136 bool ModelChecker::process_fence(ModelAction *curr)
1139 * fence-relaxed: no-op
1140 * fence-release: only log the occurence (not in this function), for
1141 * use in later synchronization
1142 * fence-acquire (this function): search for hypothetical release
1144 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1146 bool updated = false;
1147 if (curr->is_acquire()) {
1148 action_list_t *list = action_trace;
1149 action_list_t::reverse_iterator rit;
1150 /* Find X : is_read(X) && X --sb-> curr */
1151 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1152 ModelAction *act = *rit;
1155 if (act->get_tid() != curr->get_tid())
1157 /* Stop at the beginning of the thread */
1158 if (act->is_thread_start())
1160 /* Stop once we reach a prior fence-acquire */
1161 if (act->is_fence() && act->is_acquire())
1163 if (!act->is_read())
1165 /* read-acquire will find its own release sequences */
1166 if (act->is_acquire())
1169 /* Establish hypothetical release sequences */
1170 rel_heads_list_t release_heads;
1171 get_release_seq_heads(curr, act, &release_heads);
1172 for (unsigned int i = 0; i < release_heads.size(); i++)
1173 if (!curr->synchronize_with(release_heads[i]))
1174 set_bad_synchronization();
1175 if (release_heads.size() != 0)
1183 * @brief Process the current action for thread-related activity
1185 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1186 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1187 * synchronization, etc. This function is a no-op for non-THREAD actions
1188 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1190 * @param curr The current action
1191 * @return True if synchronization was updated or a thread completed
1193 bool ModelChecker::process_thread_action(ModelAction *curr)
1195 bool updated = false;
1197 switch (curr->get_type()) {
1198 case THREAD_CREATE: {
1199 thrd_t *thrd = (thrd_t *)curr->get_location();
1200 struct thread_params *params = (struct thread_params *)curr->get_value();
1201 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1203 th->set_creation(curr);
1204 /* Promises can be satisfied by children */
1205 for (unsigned int i = 0; i < promises->size(); i++) {
1206 Promise *promise = (*promises)[i];
1207 if (promise->thread_is_available(curr->get_tid()))
1208 promise->add_thread(th->get_id());
1213 Thread *blocking = curr->get_thread_operand();
1214 ModelAction *act = get_last_action(blocking->get_id());
1215 curr->synchronize_with(act);
1216 updated = true; /* trigger rel-seq checks */
1219 case THREAD_FINISH: {
1220 Thread *th = get_thread(curr);
1221 /* Wake up any joining threads */
1222 for (unsigned int i = 0; i < get_num_threads(); i++) {
1223 Thread *waiting = get_thread(int_to_id(i));
1224 if (waiting->waiting_on() == th &&
1225 waiting->get_pending()->is_thread_join())
1226 scheduler->wake(waiting);
1229 /* Completed thread can't satisfy promises */
1230 for (unsigned int i = 0; i < promises->size(); i++) {
1231 Promise *promise = (*promises)[i];
1232 if (promise->thread_is_available(th->get_id()))
1233 if (promise->eliminate_thread(th->get_id()))
1234 priv->failed_promise = true;
1236 updated = true; /* trigger rel-seq checks */
1239 case THREAD_START: {
1240 check_promises(curr->get_tid(), NULL, curr->get_cv());
1251 * @brief Process the current action for release sequence fixup activity
1253 * Performs model-checker release sequence fixups for the current action,
1254 * forcing a single pending release sequence to break (with a given, potential
1255 * "loose" write) or to complete (i.e., synchronize). If a pending release
1256 * sequence forms a complete release sequence, then we must perform the fixup
1257 * synchronization, mo_graph additions, etc.
1259 * @param curr The current action; must be a release sequence fixup action
1260 * @param work_queue The work queue to which to add work items as they are
1263 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1265 const ModelAction *write = curr->get_node()->get_relseq_break();
1266 struct release_seq *sequence = pending_rel_seqs->back();
1267 pending_rel_seqs->pop_back();
1269 ModelAction *acquire = sequence->acquire;
1270 const ModelAction *rf = sequence->rf;
1271 const ModelAction *release = sequence->release;
1275 ASSERT(release->same_thread(rf));
1277 if (write == NULL) {
1279 * @todo Forcing a synchronization requires that we set
1280 * modification order constraints. For instance, we can't allow
1281 * a fixup sequence in which two separate read-acquire
1282 * operations read from the same sequence, where the first one
1283 * synchronizes and the other doesn't. Essentially, we can't
1284 * allow any writes to insert themselves between 'release' and
1288 /* Must synchronize */
1289 if (!acquire->synchronize_with(release)) {
1290 set_bad_synchronization();
1293 /* Re-check all pending release sequences */
1294 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1295 /* Re-check act for mo_graph edges */
1296 work_queue->push_back(MOEdgeWorkEntry(acquire));
1298 /* propagate synchronization to later actions */
1299 action_list_t::reverse_iterator rit = action_trace->rbegin();
1300 for (; (*rit) != acquire; rit++) {
1301 ModelAction *propagate = *rit;
1302 if (acquire->happens_before(propagate)) {
1303 propagate->synchronize_with(acquire);
1304 /* Re-check 'propagate' for mo_graph edges */
1305 work_queue->push_back(MOEdgeWorkEntry(propagate));
1309 /* Break release sequence with new edges:
1310 * release --mo--> write --mo--> rf */
1311 mo_graph->addEdge(release, write);
1312 mo_graph->addEdge(write, rf);
1315 /* See if we have realized a data race */
1320 * Initialize the current action by performing one or more of the following
1321 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1322 * in the NodeStack, manipulating backtracking sets, allocating and
1323 * initializing clock vectors, and computing the promises to fulfill.
1325 * @param curr The current action, as passed from the user context; may be
1326 * freed/invalidated after the execution of this function, with a different
1327 * action "returned" its place (pass-by-reference)
1328 * @return True if curr is a newly-explored action; false otherwise
1330 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1332 ModelAction *newcurr;
1334 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1335 newcurr = process_rmw(*curr);
1338 if (newcurr->is_rmw())
1339 compute_promises(newcurr);
1345 (*curr)->set_seq_number(get_next_seq_num());
1347 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1349 /* First restore type and order in case of RMW operation */
1350 if ((*curr)->is_rmwr())
1351 newcurr->copy_typeandorder(*curr);
1353 ASSERT((*curr)->get_location() == newcurr->get_location());
1354 newcurr->copy_from_new(*curr);
1356 /* Discard duplicate ModelAction; use action from NodeStack */
1359 /* Always compute new clock vector */
1360 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1363 return false; /* Action was explored previously */
1367 /* Always compute new clock vector */
1368 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1370 /* Assign most recent release fence */
1371 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1374 * Perform one-time actions when pushing new ModelAction onto
1377 if (newcurr->is_write())
1378 compute_promises(newcurr);
1379 else if (newcurr->is_relseq_fixup())
1380 compute_relseq_breakwrites(newcurr);
1381 else if (newcurr->is_wait())
1382 newcurr->get_node()->set_misc_max(2);
1383 else if (newcurr->is_notify_one()) {
1384 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1386 return true; /* This was a new ModelAction */
1391 * @brief Establish reads-from relation between two actions
1393 * Perform basic operations involved with establishing a concrete rf relation,
1394 * including setting the ModelAction data and checking for release sequences.
1396 * @param act The action that is reading (must be a read)
1397 * @param rf The action from which we are reading (must be a write)
1399 * @return True if this read established synchronization
1401 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1404 ASSERT(rf->is_write());
1406 act->set_read_from(rf);
1407 if (act->is_acquire()) {
1408 rel_heads_list_t release_heads;
1409 get_release_seq_heads(act, act, &release_heads);
1410 int num_heads = release_heads.size();
1411 for (unsigned int i = 0; i < release_heads.size(); i++)
1412 if (!act->synchronize_with(release_heads[i])) {
1413 set_bad_synchronization();
1416 return num_heads > 0;
1422 * Check promises and eliminate potentially-satisfying threads when a thread is
1423 * blocked (e.g., join, lock). A thread which is waiting on another thread can
1424 * no longer satisfy a promise generated from that thread.
1426 * @param blocker The thread on which a thread is waiting
1427 * @param waiting The waiting thread
1429 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1431 for (unsigned int i = 0; i < promises->size(); i++) {
1432 Promise *promise = (*promises)[i];
1433 if (!promise->thread_is_available(waiting->get_id()))
1435 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1436 ModelAction *reader = promise->get_reader(j);
1437 if (reader->get_tid() != blocker->get_id())
1439 if (promise->eliminate_thread(waiting->get_id())) {
1440 /* Promise has failed */
1441 priv->failed_promise = true;
1443 /* Only eliminate the 'waiting' thread once */
1451 * @brief Check whether a model action is enabled.
1453 * Checks whether a lock or join operation would be successful (i.e., is the
1454 * lock already locked, or is the joined thread already complete). If not, put
1455 * the action in a waiter list.
1457 * @param curr is the ModelAction to check whether it is enabled.
1458 * @return a bool that indicates whether the action is enabled.
1460 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1461 if (curr->is_lock()) {
1462 std::mutex *lock = curr->get_mutex();
1463 struct std::mutex_state *state = lock->get_state();
1466 } else if (curr->is_thread_join()) {
1467 Thread *blocking = curr->get_thread_operand();
1468 if (!blocking->is_complete()) {
1469 thread_blocking_check_promises(blocking, get_thread(curr));
1478 * This is the heart of the model checker routine. It performs model-checking
1479 * actions corresponding to a given "current action." Among other processes, it
1480 * calculates reads-from relationships, updates synchronization clock vectors,
1481 * forms a memory_order constraints graph, and handles replay/backtrack
1482 * execution when running permutations of previously-observed executions.
1484 * @param curr The current action to process
1485 * @return The ModelAction that is actually executed; may be different than
1486 * curr; may be NULL, if the current action is not enabled to run
1488 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1491 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1493 if (!check_action_enabled(curr)) {
1494 /* Make the execution look like we chose to run this action
1495 * much later, when a lock/join can succeed */
1496 get_thread(curr)->set_pending(curr);
1497 scheduler->sleep(get_thread(curr));
1501 bool newly_explored = initialize_curr_action(&curr);
1507 wake_up_sleeping_actions(curr);
1509 /* Compute fairness information for CHESS yield algorithm */
1510 if (model->params.yieldon) {
1511 curr->get_node()->update_yield(scheduler);
1514 /* Add the action to lists before any other model-checking tasks */
1515 if (!second_part_of_rmw)
1516 add_action_to_lists(curr);
1518 /* Build may_read_from set for newly-created actions */
1519 if (newly_explored && curr->is_read())
1520 build_may_read_from(curr);
1522 /* Initialize work_queue with the "current action" work */
1523 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1524 while (!work_queue.empty() && !has_asserted()) {
1525 WorkQueueEntry work = work_queue.front();
1526 work_queue.pop_front();
1528 switch (work.type) {
1529 case WORK_CHECK_CURR_ACTION: {
1530 ModelAction *act = work.action;
1531 bool update = false; /* update this location's release seq's */
1532 bool update_all = false; /* update all release seq's */
1534 if (process_thread_action(curr))
1537 if (act->is_read() && !second_part_of_rmw && process_read(act))
1540 if (act->is_write() && process_write(act))
1543 if (act->is_fence() && process_fence(act))
1546 if (act->is_mutex_op() && process_mutex(act))
1549 if (act->is_relseq_fixup())
1550 process_relseq_fixup(curr, &work_queue);
1553 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1555 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1558 case WORK_CHECK_RELEASE_SEQ:
1559 resolve_release_sequences(work.location, &work_queue);
1561 case WORK_CHECK_MO_EDGES: {
1562 /** @todo Complete verification of work_queue */
1563 ModelAction *act = work.action;
1564 bool updated = false;
1566 if (act->is_read()) {
1567 const ModelAction *rf = act->get_reads_from();
1568 const Promise *promise = act->get_reads_from_promise();
1570 if (r_modification_order(act, rf))
1572 } else if (promise) {
1573 if (r_modification_order(act, promise))
1577 if (act->is_write()) {
1578 if (w_modification_order(act, NULL))
1581 mo_graph->commitChanges();
1584 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1593 check_curr_backtracking(curr);
1594 set_backtracking(curr);
1598 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1600 Node *currnode = curr->get_node();
1601 Node *parnode = currnode->get_parent();
1603 if ((parnode && !parnode->backtrack_empty()) ||
1604 !currnode->misc_empty() ||
1605 !currnode->read_from_empty() ||
1606 !currnode->promise_empty() ||
1607 !currnode->relseq_break_empty()) {
1608 set_latest_backtrack(curr);
1612 bool ModelChecker::promises_expired() const
1614 for (unsigned int i = 0; i < promises->size(); i++) {
1615 Promise *promise = (*promises)[i];
1616 if (promise->get_expiration() < priv->used_sequence_numbers)
1623 * This is the strongest feasibility check available.
1624 * @return whether the current trace (partial or complete) must be a prefix of
1627 bool ModelChecker::isfeasibleprefix() const
1629 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1633 * Print disagnostic information about an infeasible execution
1634 * @param prefix A string to prefix the output with; if NULL, then a default
1635 * message prefix will be provided
1637 void ModelChecker::print_infeasibility(const char *prefix) const
1641 if (mo_graph->checkForCycles())
1642 ptr += sprintf(ptr, "[mo cycle]");
1643 if (priv->failed_promise)
1644 ptr += sprintf(ptr, "[failed promise]");
1645 if (priv->too_many_reads)
1646 ptr += sprintf(ptr, "[too many reads]");
1647 if (priv->no_valid_reads)
1648 ptr += sprintf(ptr, "[no valid reads-from]");
1649 if (priv->bad_synchronization)
1650 ptr += sprintf(ptr, "[bad sw ordering]");
1651 if (promises_expired())
1652 ptr += sprintf(ptr, "[promise expired]");
1653 if (promises->size() != 0)
1654 ptr += sprintf(ptr, "[unresolved promise]");
1656 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1660 * Returns whether the current completed trace is feasible, except for pending
1661 * release sequences.
1663 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1665 return !is_infeasible() && promises->size() == 0;
1669 * Check if the current partial trace is infeasible. Does not check any
1670 * end-of-execution flags, which might rule out the execution. Thus, this is
1671 * useful only for ruling an execution as infeasible.
1672 * @return whether the current partial trace is infeasible.
1674 bool ModelChecker::is_infeasible() const
1676 return mo_graph->checkForCycles() ||
1677 priv->no_valid_reads ||
1678 priv->failed_promise ||
1679 priv->too_many_reads ||
1680 priv->bad_synchronization ||
1684 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1685 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1686 ModelAction *lastread = get_last_action(act->get_tid());
1687 lastread->process_rmw(act);
1688 if (act->is_rmw()) {
1689 if (lastread->get_reads_from())
1690 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1692 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1693 mo_graph->commitChanges();
1699 * A helper function for ModelChecker::check_recency, to check if the current
1700 * thread is able to read from a different write/promise for 'params.maxreads'
1701 * number of steps and if that write/promise should become visible (i.e., is
1702 * ordered later in the modification order). This helps model memory liveness.
1704 * @param curr The current action. Must be a read.
1705 * @param rf The write/promise from which we plan to read
1706 * @param other_rf The write/promise from which we may read
1707 * @return True if we were able to read from other_rf for params.maxreads steps
1709 template <typename T, typename U>
1710 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1712 /* Need a different write/promise */
1713 if (other_rf->equals(rf))
1716 /* Only look for "newer" writes/promises */
1717 if (!mo_graph->checkReachable(rf, other_rf))
1720 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1721 action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1722 action_list_t::reverse_iterator rit = list->rbegin();
1723 ASSERT((*rit) == curr);
1724 /* Skip past curr */
1727 /* Does this write/promise work for everyone? */
1728 for (int i = 0; i < params.maxreads; i++, rit++) {
1729 ModelAction *act = *rit;
1730 if (!act->may_read_from(other_rf))
1737 * Checks whether a thread has read from the same write or Promise for too many
1738 * times without seeing the effects of a later write/Promise.
1741 * 1) there must a different write/promise that we could read from,
1742 * 2) we must have read from the same write/promise in excess of maxreads times,
1743 * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1744 * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1746 * If so, we decide that the execution is no longer feasible.
1748 * @param curr The current action. Must be a read.
1749 * @param rf The ModelAction/Promise from which we might read.
1750 * @return True if the read should succeed; false otherwise
1752 template <typename T>
1753 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1755 if (!params.maxreads)
1758 //NOTE: Next check is just optimization, not really necessary....
1759 if (curr->get_node()->get_read_from_past_size() +
1760 curr->get_node()->get_read_from_promise_size() <= 1)
1763 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1764 int tid = id_to_int(curr->get_tid());
1765 ASSERT(tid < (int)thrd_lists->size());
1766 action_list_t *list = &(*thrd_lists)[tid];
1767 action_list_t::reverse_iterator rit = list->rbegin();
1768 ASSERT((*rit) == curr);
1769 /* Skip past curr */
1772 action_list_t::reverse_iterator ritcopy = rit;
1773 /* See if we have enough reads from the same value */
1774 for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1775 if (ritcopy == list->rend())
1777 ModelAction *act = *ritcopy;
1778 if (!act->is_read())
1780 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1782 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1784 if (act->get_node()->get_read_from_past_size() +
1785 act->get_node()->get_read_from_promise_size() <= 1)
1788 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1789 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1790 if (should_read_instead(curr, rf, write))
1791 return false; /* liveness failure */
1793 for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1794 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1795 if (should_read_instead(curr, rf, promise))
1796 return false; /* liveness failure */
1802 * Updates the mo_graph with the constraints imposed from the current
1805 * Basic idea is the following: Go through each other thread and find
1806 * the last action that happened before our read. Two cases:
1808 * (1) The action is a write => that write must either occur before
1809 * the write we read from or be the write we read from.
1811 * (2) The action is a read => the write that that action read from
1812 * must occur before the write we read from or be the same write.
1814 * @param curr The current action. Must be a read.
1815 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1816 * @return True if modification order edges were added; false otherwise
1818 template <typename rf_type>
1819 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1821 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1824 ASSERT(curr->is_read());
1826 /* Last SC fence in the current thread */
1827 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1828 ModelAction *last_sc_write = NULL;
1829 if (curr->is_seqcst())
1830 last_sc_write = get_last_seq_cst_write(curr);
1832 /* Iterate over all threads */
1833 for (i = 0; i < thrd_lists->size(); i++) {
1834 /* Last SC fence in thread i */
1835 ModelAction *last_sc_fence_thread_local = NULL;
1836 if (int_to_id((int)i) != curr->get_tid())
1837 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1839 /* Last SC fence in thread i, before last SC fence in current thread */
1840 ModelAction *last_sc_fence_thread_before = NULL;
1841 if (last_sc_fence_local)
1842 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1844 /* Iterate over actions in thread, starting from most recent */
1845 action_list_t *list = &(*thrd_lists)[i];
1846 action_list_t::reverse_iterator rit;
1847 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1848 ModelAction *act = *rit;
1853 /* Don't want to add reflexive edges on 'rf' */
1854 if (act->equals(rf)) {
1855 if (act->happens_before(curr))
1861 if (act->is_write()) {
1862 /* C++, Section 29.3 statement 5 */
1863 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1864 *act < *last_sc_fence_thread_local) {
1865 added = mo_graph->addEdge(act, rf) || added;
1868 /* C++, Section 29.3 statement 4 */
1869 else if (act->is_seqcst() && last_sc_fence_local &&
1870 *act < *last_sc_fence_local) {
1871 added = mo_graph->addEdge(act, rf) || added;
1874 /* C++, Section 29.3 statement 6 */
1875 else if (last_sc_fence_thread_before &&
1876 *act < *last_sc_fence_thread_before) {
1877 added = mo_graph->addEdge(act, rf) || added;
1882 /* C++, Section 29.3 statement 3 (second subpoint) */
1883 if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1884 added = mo_graph->addEdge(act, rf) || added;
1889 * Include at most one act per-thread that "happens
1892 if (act->happens_before(curr)) {
1893 if (act->is_write()) {
1894 added = mo_graph->addEdge(act, rf) || added;
1896 const ModelAction *prevrf = act->get_reads_from();
1897 const Promise *prevrf_promise = act->get_reads_from_promise();
1899 if (!prevrf->equals(rf))
1900 added = mo_graph->addEdge(prevrf, rf) || added;
1901 } else if (!prevrf_promise->equals(rf)) {
1902 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1911 * All compatible, thread-exclusive promises must be ordered after any
1912 * concrete loads from the same thread
1914 for (unsigned int i = 0; i < promises->size(); i++)
1915 if ((*promises)[i]->is_compatible_exclusive(curr))
1916 added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1922 * Updates the mo_graph with the constraints imposed from the current write.
1924 * Basic idea is the following: Go through each other thread and find
1925 * the lastest action that happened before our write. Two cases:
1927 * (1) The action is a write => that write must occur before
1930 * (2) The action is a read => the write that that action read from
1931 * must occur before the current write.
1933 * This method also handles two other issues:
1935 * (I) Sequential Consistency: Making sure that if the current write is
1936 * seq_cst, that it occurs after the previous seq_cst write.
1938 * (II) Sending the write back to non-synchronizing reads.
1940 * @param curr The current action. Must be a write.
1941 * @param send_fv A vector for stashing reads to which we may pass our future
1942 * value. If NULL, then don't record any future values.
1943 * @return True if modification order edges were added; false otherwise
1945 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1947 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1950 ASSERT(curr->is_write());
1952 if (curr->is_seqcst()) {
1953 /* We have to at least see the last sequentially consistent write,
1954 so we are initialized. */
1955 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1956 if (last_seq_cst != NULL) {
1957 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1961 /* Last SC fence in the current thread */
1962 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1964 /* Iterate over all threads */
1965 for (i = 0; i < thrd_lists->size(); i++) {
1966 /* Last SC fence in thread i, before last SC fence in current thread */
1967 ModelAction *last_sc_fence_thread_before = NULL;
1968 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1969 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1971 /* Iterate over actions in thread, starting from most recent */
1972 action_list_t *list = &(*thrd_lists)[i];
1973 action_list_t::reverse_iterator rit;
1974 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1975 ModelAction *act = *rit;
1978 * 1) If RMW and it actually read from something, then we
1979 * already have all relevant edges, so just skip to next
1982 * 2) If RMW and it didn't read from anything, we should
1983 * whatever edge we can get to speed up convergence.
1985 * 3) If normal write, we need to look at earlier actions, so
1986 * continue processing list.
1988 if (curr->is_rmw()) {
1989 if (curr->get_reads_from() != NULL)
1997 /* C++, Section 29.3 statement 7 */
1998 if (last_sc_fence_thread_before && act->is_write() &&
1999 *act < *last_sc_fence_thread_before) {
2000 added = mo_graph->addEdge(act, curr) || added;
2005 * Include at most one act per-thread that "happens
2008 if (act->happens_before(curr)) {
2010 * Note: if act is RMW, just add edge:
2012 * The following edge should be handled elsewhere:
2013 * readfrom(act) --mo--> act
2015 if (act->is_write())
2016 added = mo_graph->addEdge(act, curr) || added;
2017 else if (act->is_read()) {
2018 //if previous read accessed a null, just keep going
2019 if (act->get_reads_from() == NULL)
2021 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2024 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2025 !act->same_thread(curr)) {
2026 /* We have an action that:
2027 (1) did not happen before us
2028 (2) is a read and we are a write
2029 (3) cannot synchronize with us
2030 (4) is in a different thread
2032 that read could potentially read from our write. Note that
2033 these checks are overly conservative at this point, we'll
2034 do more checks before actually removing the
2038 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2039 if (!is_infeasible())
2040 send_fv->push_back(act);
2041 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2042 add_future_value(curr, act);
2049 * All compatible, thread-exclusive promises must be ordered after any
2050 * concrete stores to the same thread, or else they can be merged with
2053 for (unsigned int i = 0; i < promises->size(); i++)
2054 if ((*promises)[i]->is_compatible_exclusive(curr))
2055 added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2060 /** Arbitrary reads from the future are not allowed. Section 29.3
2061 * part 9 places some constraints. This method checks one result of constraint
2062 * constraint. Others require compiler support. */
2063 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2065 if (!writer->is_rmw())
2068 if (!reader->is_rmw())
2071 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2072 if (search == reader)
2074 if (search->get_tid() == reader->get_tid() &&
2075 search->happens_before(reader))
2083 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2084 * some constraints. This method checks one the following constraint (others
2085 * require compiler support):
2087 * If X --hb-> Y --mo-> Z, then X should not read from Z.
2089 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2091 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2093 /* Iterate over all threads */
2094 for (i = 0; i < thrd_lists->size(); i++) {
2095 const ModelAction *write_after_read = NULL;
2097 /* Iterate over actions in thread, starting from most recent */
2098 action_list_t *list = &(*thrd_lists)[i];
2099 action_list_t::reverse_iterator rit;
2100 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2101 ModelAction *act = *rit;
2103 /* Don't disallow due to act == reader */
2104 if (!reader->happens_before(act) || reader == act)
2106 else if (act->is_write())
2107 write_after_read = act;
2108 else if (act->is_read() && act->get_reads_from() != NULL)
2109 write_after_read = act->get_reads_from();
2112 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2119 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2120 * The ModelAction under consideration is expected to be taking part in
2121 * release/acquire synchronization as an object of the "reads from" relation.
2122 * Note that this can only provide release sequence support for RMW chains
2123 * which do not read from the future, as those actions cannot be traced until
2124 * their "promise" is fulfilled. Similarly, we may not even establish the
2125 * presence of a release sequence with certainty, as some modification order
2126 * constraints may be decided further in the future. Thus, this function
2127 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2128 * and a boolean representing certainty.
2130 * @param rf The action that might be part of a release sequence. Must be a
2132 * @param release_heads A pass-by-reference style return parameter. After
2133 * execution of this function, release_heads will contain the heads of all the
2134 * relevant release sequences, if any exists with certainty
2135 * @param pending A pass-by-reference style return parameter which is only used
2136 * when returning false (i.e., uncertain). Returns most information regarding
2137 * an uncertain release sequence, including any write operations that might
2138 * break the sequence.
2139 * @return true, if the ModelChecker is certain that release_heads is complete;
2142 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2143 rel_heads_list_t *release_heads,
2144 struct release_seq *pending) const
2146 /* Only check for release sequences if there are no cycles */
2147 if (mo_graph->checkForCycles())
2150 for ( ; rf != NULL; rf = rf->get_reads_from()) {
2151 ASSERT(rf->is_write());
2153 if (rf->is_release())
2154 release_heads->push_back(rf);
2155 else if (rf->get_last_fence_release())
2156 release_heads->push_back(rf->get_last_fence_release());
2158 break; /* End of RMW chain */
2160 /** @todo Need to be smarter here... In the linux lock
2161 * example, this will run to the beginning of the program for
2163 /** @todo The way to be smarter here is to keep going until 1
2164 * thread has a release preceded by an acquire and you've seen
2167 /* acq_rel RMW is a sufficient stopping condition */
2168 if (rf->is_acquire() && rf->is_release())
2169 return true; /* complete */
2172 /* read from future: need to settle this later */
2174 return false; /* incomplete */
2177 if (rf->is_release())
2178 return true; /* complete */
2180 /* else relaxed write
2181 * - check for fence-release in the same thread (29.8, stmt. 3)
2182 * - check modification order for contiguous subsequence
2183 * -> rf must be same thread as release */
2185 const ModelAction *fence_release = rf->get_last_fence_release();
2186 /* Synchronize with a fence-release unconditionally; we don't need to
2187 * find any more "contiguous subsequence..." for it */
2189 release_heads->push_back(fence_release);
2191 int tid = id_to_int(rf->get_tid());
2192 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2193 action_list_t *list = &(*thrd_lists)[tid];
2194 action_list_t::const_reverse_iterator rit;
2196 /* Find rf in the thread list */
2197 rit = std::find(list->rbegin(), list->rend(), rf);
2198 ASSERT(rit != list->rend());
2200 /* Find the last {write,fence}-release */
2201 for (; rit != list->rend(); rit++) {
2202 if (fence_release && *(*rit) < *fence_release)
2204 if ((*rit)->is_release())
2207 if (rit == list->rend()) {
2208 /* No write-release in this thread */
2209 return true; /* complete */
2210 } else if (fence_release && *(*rit) < *fence_release) {
2211 /* The fence-release is more recent (and so, "stronger") than
2212 * the most recent write-release */
2213 return true; /* complete */
2214 } /* else, need to establish contiguous release sequence */
2215 ModelAction *release = *rit;
2217 ASSERT(rf->same_thread(release));
2219 pending->writes.clear();
2221 bool certain = true;
2222 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2223 if (id_to_int(rf->get_tid()) == (int)i)
2225 list = &(*thrd_lists)[i];
2227 /* Can we ensure no future writes from this thread may break
2228 * the release seq? */
2229 bool future_ordered = false;
2231 ModelAction *last = get_last_action(int_to_id(i));
2232 Thread *th = get_thread(int_to_id(i));
2233 if ((last && rf->happens_before(last)) ||
2236 future_ordered = true;
2238 ASSERT(!th->is_model_thread() || future_ordered);
2240 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2241 const ModelAction *act = *rit;
2242 /* Reach synchronization -> this thread is complete */
2243 if (act->happens_before(release))
2245 if (rf->happens_before(act)) {
2246 future_ordered = true;
2250 /* Only non-RMW writes can break release sequences */
2251 if (!act->is_write() || act->is_rmw())
2254 /* Check modification order */
2255 if (mo_graph->checkReachable(rf, act)) {
2256 /* rf --mo--> act */
2257 future_ordered = true;
2260 if (mo_graph->checkReachable(act, release))
2261 /* act --mo--> release */
2263 if (mo_graph->checkReachable(release, act) &&
2264 mo_graph->checkReachable(act, rf)) {
2265 /* release --mo-> act --mo--> rf */
2266 return true; /* complete */
2268 /* act may break release sequence */
2269 pending->writes.push_back(act);
2272 if (!future_ordered)
2273 certain = false; /* This thread is uncertain */
2277 release_heads->push_back(release);
2278 pending->writes.clear();
2280 pending->release = release;
2287 * An interface for getting the release sequence head(s) with which a
2288 * given ModelAction must synchronize. This function only returns a non-empty
2289 * result when it can locate a release sequence head with certainty. Otherwise,
2290 * it may mark the internal state of the ModelChecker so that it will handle
2291 * the release sequence at a later time, causing @a acquire to update its
2292 * synchronization at some later point in execution.
2294 * @param acquire The 'acquire' action that may synchronize with a release
2296 * @param read The read action that may read from a release sequence; this may
2297 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2298 * when 'acquire' is a fence-acquire)
2299 * @param release_heads A pass-by-reference return parameter. Will be filled
2300 * with the head(s) of the release sequence(s), if they exists with certainty.
2301 * @see ModelChecker::release_seq_heads
2303 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2304 ModelAction *read, rel_heads_list_t *release_heads)
2306 const ModelAction *rf = read->get_reads_from();
2307 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2308 sequence->acquire = acquire;
2309 sequence->read = read;
2311 if (!release_seq_heads(rf, release_heads, sequence)) {
2312 /* add act to 'lazy checking' list */
2313 pending_rel_seqs->push_back(sequence);
2315 snapshot_free(sequence);
2320 * Attempt to resolve all stashed operations that might synchronize with a
2321 * release sequence for a given location. This implements the "lazy" portion of
2322 * determining whether or not a release sequence was contiguous, since not all
2323 * modification order information is present at the time an action occurs.
2325 * @param location The location/object that should be checked for release
2326 * sequence resolutions. A NULL value means to check all locations.
2327 * @param work_queue The work queue to which to add work items as they are
2329 * @return True if any updates occurred (new synchronization, new mo_graph
2332 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2334 bool updated = false;
2335 SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2336 while (it != pending_rel_seqs->end()) {
2337 struct release_seq *pending = *it;
2338 ModelAction *acquire = pending->acquire;
2339 const ModelAction *read = pending->read;
2341 /* Only resolve sequences on the given location, if provided */
2342 if (location && read->get_location() != location) {
2347 const ModelAction *rf = read->get_reads_from();
2348 rel_heads_list_t release_heads;
2350 complete = release_seq_heads(rf, &release_heads, pending);
2351 for (unsigned int i = 0; i < release_heads.size(); i++) {
2352 if (!acquire->has_synchronized_with(release_heads[i])) {
2353 if (acquire->synchronize_with(release_heads[i]))
2356 set_bad_synchronization();
2361 /* Re-check all pending release sequences */
2362 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2363 /* Re-check read-acquire for mo_graph edges */
2364 if (acquire->is_read())
2365 work_queue->push_back(MOEdgeWorkEntry(acquire));
2367 /* propagate synchronization to later actions */
2368 action_list_t::reverse_iterator rit = action_trace->rbegin();
2369 for (; (*rit) != acquire; rit++) {
2370 ModelAction *propagate = *rit;
2371 if (acquire->happens_before(propagate)) {
2372 propagate->synchronize_with(acquire);
2373 /* Re-check 'propagate' for mo_graph edges */
2374 work_queue->push_back(MOEdgeWorkEntry(propagate));
2379 it = pending_rel_seqs->erase(it);
2380 snapshot_free(pending);
2386 // If we resolved promises or data races, see if we have realized a data race.
2393 * Performs various bookkeeping operations for the current ModelAction. For
2394 * instance, adds action to the per-object, per-thread action vector and to the
2395 * action trace list of all thread actions.
2397 * @param act is the ModelAction to add.
2399 void ModelChecker::add_action_to_lists(ModelAction *act)
2401 int tid = id_to_int(act->get_tid());
2402 ModelAction *uninit = NULL;
2404 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2405 if (list->empty() && act->is_atomic_var()) {
2406 uninit = get_uninitialized_action(act);
2407 uninit_id = id_to_int(uninit->get_tid());
2408 list->push_front(uninit);
2410 list->push_back(act);
2412 action_trace->push_back(act);
2414 action_trace->push_front(uninit);
2416 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2417 if (tid >= (int)vec->size())
2418 vec->resize(priv->next_thread_id);
2419 (*vec)[tid].push_back(act);
2421 (*vec)[uninit_id].push_front(uninit);
2423 if ((int)thrd_last_action->size() <= tid)
2424 thrd_last_action->resize(get_num_threads());
2425 (*thrd_last_action)[tid] = act;
2427 (*thrd_last_action)[uninit_id] = uninit;
2429 if (act->is_fence() && act->is_release()) {
2430 if ((int)thrd_last_fence_release->size() <= tid)
2431 thrd_last_fence_release->resize(get_num_threads());
2432 (*thrd_last_fence_release)[tid] = act;
2435 if (act->is_wait()) {
2436 void *mutex_loc = (void *) act->get_value();
2437 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2439 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2440 if (tid >= (int)vec->size())
2441 vec->resize(priv->next_thread_id);
2442 (*vec)[tid].push_back(act);
2447 * @brief Get the last action performed by a particular Thread
2448 * @param tid The thread ID of the Thread in question
2449 * @return The last action in the thread
2451 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2453 int threadid = id_to_int(tid);
2454 if (threadid < (int)thrd_last_action->size())
2455 return (*thrd_last_action)[id_to_int(tid)];
2461 * @brief Get the last fence release performed by a particular Thread
2462 * @param tid The thread ID of the Thread in question
2463 * @return The last fence release in the thread, if one exists; NULL otherwise
2465 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2467 int threadid = id_to_int(tid);
2468 if (threadid < (int)thrd_last_fence_release->size())
2469 return (*thrd_last_fence_release)[id_to_int(tid)];
2475 * Gets the last memory_order_seq_cst write (in the total global sequence)
2476 * performed on a particular object (i.e., memory location), not including the
2478 * @param curr The current ModelAction; also denotes the object location to
2480 * @return The last seq_cst write
2482 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2484 void *location = curr->get_location();
2485 action_list_t *list = get_safe_ptr_action(obj_map, location);
2486 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2487 action_list_t::reverse_iterator rit;
2488 for (rit = list->rbegin(); (*rit) != curr; rit++)
2490 rit++; /* Skip past curr */
2491 for ( ; rit != list->rend(); rit++)
2492 if ((*rit)->is_write() && (*rit)->is_seqcst())
2498 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2499 * performed in a particular thread, prior to a particular fence.
2500 * @param tid The ID of the thread to check
2501 * @param before_fence The fence from which to begin the search; if NULL, then
2502 * search for the most recent fence in the thread.
2503 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2505 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2507 /* All fences should have NULL location */
2508 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2509 action_list_t::reverse_iterator rit = list->rbegin();
2512 for (; rit != list->rend(); rit++)
2513 if (*rit == before_fence)
2516 ASSERT(*rit == before_fence);
2520 for (; rit != list->rend(); rit++)
2521 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2527 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2528 * location). This function identifies the mutex according to the current
2529 * action, which is presumed to perform on the same mutex.
2530 * @param curr The current ModelAction; also denotes the object location to
2532 * @return The last unlock operation
2534 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2536 void *location = curr->get_location();
2537 action_list_t *list = get_safe_ptr_action(obj_map, location);
2538 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2539 action_list_t::reverse_iterator rit;
2540 for (rit = list->rbegin(); rit != list->rend(); rit++)
2541 if ((*rit)->is_unlock() || (*rit)->is_wait())
2546 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2548 ModelAction *parent = get_last_action(tid);
2550 parent = get_thread(tid)->get_creation();
2555 * Returns the clock vector for a given thread.
2556 * @param tid The thread whose clock vector we want
2557 * @return Desired clock vector
2559 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2561 return get_parent_action(tid)->get_cv();
2565 * @brief Find the promise (if any) to resolve for the current action and
2566 * remove it from the pending promise vector
2567 * @param curr The current ModelAction. Should be a write.
2568 * @return The Promise to resolve, if any; otherwise NULL
2570 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2572 for (unsigned int i = 0; i < promises->size(); i++)
2573 if (curr->get_node()->get_promise(i)) {
2574 Promise *ret = (*promises)[i];
2575 promises->erase(promises->begin() + i);
2582 * Resolve a Promise with a current write.
2583 * @param write The ModelAction that is fulfilling Promises
2584 * @param promise The Promise to resolve
2585 * @return True if the Promise was successfully resolved; false otherwise
2587 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2589 ModelVector<ModelAction *> actions_to_check;
2591 for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2592 ModelAction *read = promise->get_reader(i);
2593 read_from(read, write);
2594 actions_to_check.push_back(read);
2596 /* Make sure the promise's value matches the write's value */
2597 ASSERT(promise->is_compatible(write) && promise->same_value(write));
2598 if (!mo_graph->resolvePromise(promise, write))
2599 priv->failed_promise = true;
2602 * @todo It is possible to end up in an inconsistent state, where a
2603 * "resolved" promise may still be referenced if
2604 * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2606 * Note that the inconsistency only matters when dumping mo_graph to
2612 //Check whether reading these writes has made threads unable to
2614 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2615 ModelAction *read = actions_to_check[i];
2616 mo_check_promises(read, true);
2623 * Compute the set of promises that could potentially be satisfied by this
2624 * action. Note that the set computation actually appears in the Node, not in
2626 * @param curr The ModelAction that may satisfy promises
2628 void ModelChecker::compute_promises(ModelAction *curr)
2630 for (unsigned int i = 0; i < promises->size(); i++) {
2631 Promise *promise = (*promises)[i];
2632 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2635 bool satisfy = true;
2636 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2637 const ModelAction *act = promise->get_reader(j);
2638 if (act->happens_before(curr) ||
2639 act->could_synchronize_with(curr)) {
2645 curr->get_node()->set_promise(i);
2649 /** Checks promises in response to change in ClockVector Threads. */
2650 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2652 for (unsigned int i = 0; i < promises->size(); i++) {
2653 Promise *promise = (*promises)[i];
2654 if (!promise->thread_is_available(tid))
2656 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2657 const ModelAction *act = promise->get_reader(j);
2658 if ((!old_cv || !old_cv->synchronized_since(act)) &&
2659 merge_cv->synchronized_since(act)) {
2660 if (promise->eliminate_thread(tid)) {
2661 /* Promise has failed */
2662 priv->failed_promise = true;
2670 void ModelChecker::check_promises_thread_disabled()
2672 for (unsigned int i = 0; i < promises->size(); i++) {
2673 Promise *promise = (*promises)[i];
2674 if (promise->has_failed()) {
2675 priv->failed_promise = true;
2682 * @brief Checks promises in response to addition to modification order for
2685 * We test whether threads are still available for satisfying promises after an
2686 * addition to our modification order constraints. Those that are unavailable
2687 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2688 * that promise has failed.
2690 * @param act The ModelAction which updated the modification order
2691 * @param is_read_check Should be true if act is a read and we must check for
2692 * updates to the store from which it read (there is a distinction here for
2693 * RMW's, which are both a load and a store)
2695 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2697 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2699 for (unsigned int i = 0; i < promises->size(); i++) {
2700 Promise *promise = (*promises)[i];
2702 // Is this promise on the same location?
2703 if (!promise->same_location(write))
2706 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2707 const ModelAction *pread = promise->get_reader(j);
2708 if (!pread->happens_before(act))
2710 if (mo_graph->checkPromise(write, promise)) {
2711 priv->failed_promise = true;
2717 // Don't do any lookups twice for the same thread
2718 if (!promise->thread_is_available(act->get_tid()))
2721 if (mo_graph->checkReachable(promise, write)) {
2722 if (mo_graph->checkPromise(write, promise)) {
2723 priv->failed_promise = true;
2731 * Compute the set of writes that may break the current pending release
2732 * sequence. This information is extracted from previou release sequence
2735 * @param curr The current ModelAction. Must be a release sequence fixup
2738 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2740 if (pending_rel_seqs->empty())
2743 struct release_seq *pending = pending_rel_seqs->back();
2744 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2745 const ModelAction *write = pending->writes[i];
2746 curr->get_node()->add_relseq_break(write);
2749 /* NULL means don't break the sequence; just synchronize */
2750 curr->get_node()->add_relseq_break(NULL);
2754 * Build up an initial set of all past writes that this 'read' action may read
2755 * from, as well as any previously-observed future values that must still be valid.
2757 * @param curr is the current ModelAction that we are exploring; it must be a
2760 void ModelChecker::build_may_read_from(ModelAction *curr)
2762 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2764 ASSERT(curr->is_read());
2766 ModelAction *last_sc_write = NULL;
2768 if (curr->is_seqcst())
2769 last_sc_write = get_last_seq_cst_write(curr);
2771 /* Iterate over all threads */
2772 for (i = 0; i < thrd_lists->size(); i++) {
2773 /* Iterate over actions in thread, starting from most recent */
2774 action_list_t *list = &(*thrd_lists)[i];
2775 action_list_t::reverse_iterator rit;
2776 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2777 ModelAction *act = *rit;
2779 /* Only consider 'write' actions */
2780 if (!act->is_write() || act == curr)
2783 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2784 bool allow_read = true;
2786 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2788 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2792 /* Only add feasible reads */
2793 mo_graph->startChanges();
2794 r_modification_order(curr, act);
2795 if (!is_infeasible())
2796 curr->get_node()->add_read_from_past(act);
2797 mo_graph->rollbackChanges();
2800 /* Include at most one act per-thread that "happens before" curr */
2801 if (act->happens_before(curr))
2806 /* Inherit existing, promised future values */
2807 for (i = 0; i < promises->size(); i++) {
2808 const Promise *promise = (*promises)[i];
2809 const ModelAction *promise_read = promise->get_reader(0);
2810 if (promise_read->same_var(curr)) {
2811 /* Only add feasible future-values */
2812 mo_graph->startChanges();
2813 r_modification_order(curr, promise);
2814 if (!is_infeasible())
2815 curr->get_node()->add_read_from_promise(promise_read);
2816 mo_graph->rollbackChanges();
2820 /* We may find no valid may-read-from only if the execution is doomed */
2821 if (!curr->get_node()->read_from_size()) {
2822 priv->no_valid_reads = true;
2826 if (DBG_ENABLED()) {
2827 model_print("Reached read action:\n");
2829 model_print("Printing read_from_past\n");
2830 curr->get_node()->print_read_from_past();
2831 model_print("End printing read_from_past\n");
2835 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2837 for ( ; write != NULL; write = write->get_reads_from()) {
2838 /* UNINIT actions don't have a Node, and they never sleep */
2839 if (write->is_uninitialized())
2841 Node *prevnode = write->get_node()->get_parent();
2843 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2844 if (write->is_release() && thread_sleep)
2846 if (!write->is_rmw())
2853 * @brief Get an action representing an uninitialized atomic
2855 * This function may create a new one or try to retrieve one from the NodeStack
2857 * @param curr The current action, which prompts the creation of an UNINIT action
2858 * @return A pointer to the UNINIT ModelAction
2860 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2862 Node *node = curr->get_node();
2863 ModelAction *act = node->get_uninit_action();
2865 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2866 node->set_uninit_action(act);
2868 act->create_cv(NULL);
2872 static void print_list(action_list_t *list)
2874 action_list_t::iterator it;
2876 model_print("---------------------------------------------------------------------\n");
2878 unsigned int hash = 0;
2880 for (it = list->begin(); it != list->end(); it++) {
2881 const ModelAction *act = *it;
2882 if (act->get_seq_number() > 0)
2884 hash = hash^(hash<<3)^((*it)->hash());
2886 model_print("HASH %u\n", hash);
2887 model_print("---------------------------------------------------------------------\n");
2890 #if SUPPORT_MOD_ORDER_DUMP
2891 void ModelChecker::dumpGraph(char *filename) const
2894 sprintf(buffer, "%s.dot", filename);
2895 FILE *file = fopen(buffer, "w");
2896 fprintf(file, "digraph %s {\n", filename);
2897 mo_graph->dumpNodes(file);
2898 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2900 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2901 ModelAction *act = *it;
2902 if (act->is_read()) {
2903 mo_graph->dot_print_node(file, act);
2904 if (act->get_reads_from())
2905 mo_graph->dot_print_edge(file,
2906 act->get_reads_from(),
2908 "label=\"rf\", color=red, weight=2");
2910 mo_graph->dot_print_edge(file,
2911 act->get_reads_from_promise(),
2913 "label=\"rf\", color=red");
2915 if (thread_array[act->get_tid()]) {
2916 mo_graph->dot_print_edge(file,
2917 thread_array[id_to_int(act->get_tid())],
2919 "label=\"sb\", color=blue, weight=400");
2922 thread_array[act->get_tid()] = act;
2924 fprintf(file, "}\n");
2925 model_free(thread_array);
2930 /** @brief Prints an execution trace summary. */
2931 void ModelChecker::print_summary() const
2933 #if SUPPORT_MOD_ORDER_DUMP
2934 char buffername[100];
2935 sprintf(buffername, "exec%04u", stats.num_total);
2936 mo_graph->dumpGraphToFile(buffername);
2937 sprintf(buffername, "graph%04u", stats.num_total);
2938 dumpGraph(buffername);
2941 model_print("Execution %d:", stats.num_total);
2942 if (isfeasibleprefix()) {
2943 if (scheduler->all_threads_sleeping())
2944 model_print(" SLEEP-SET REDUNDANT");
2947 print_infeasibility(" INFEASIBLE");
2948 print_list(action_trace);
2950 if (!promises->empty()) {
2951 model_print("Pending promises:\n");
2952 for (unsigned int i = 0; i < promises->size(); i++) {
2953 model_print(" [P%u] ", i);
2954 (*promises)[i]->print();
2961 * Add a Thread to the system for the first time. Should only be called once
2963 * @param t The Thread to add
2965 void ModelChecker::add_thread(Thread *t)
2967 thread_map->put(id_to_int(t->get_id()), t);
2968 scheduler->add_thread(t);
2972 * @brief Get a Thread reference by its ID
2973 * @param tid The Thread's ID
2974 * @return A Thread reference
2976 Thread * ModelChecker::get_thread(thread_id_t tid) const
2978 return thread_map->get(id_to_int(tid));
2982 * @brief Get a reference to the Thread in which a ModelAction was executed
2983 * @param act The ModelAction
2984 * @return A Thread reference
2986 Thread * ModelChecker::get_thread(const ModelAction *act) const
2988 return get_thread(act->get_tid());
2992 * @brief Get a Promise's "promise number"
2994 * A "promise number" is an index number that is unique to a promise, valid
2995 * only for a specific snapshot of an execution trace. Promises may come and go
2996 * as they are generated an resolved, so an index only retains meaning for the
2999 * @param promise The Promise to check
3000 * @return The promise index, if the promise still is valid; otherwise -1
3002 int ModelChecker::get_promise_number(const Promise *promise) const
3004 for (unsigned int i = 0; i < promises->size(); i++)
3005 if ((*promises)[i] == promise)
3012 * @brief Check if a Thread is currently enabled
3013 * @param t The Thread to check
3014 * @return True if the Thread is currently enabled
3016 bool ModelChecker::is_enabled(Thread *t) const
3018 return scheduler->is_enabled(t);
3022 * @brief Check if a Thread is currently enabled
3023 * @param tid The ID of the Thread to check
3024 * @return True if the Thread is currently enabled
3026 bool ModelChecker::is_enabled(thread_id_t tid) const
3028 return scheduler->is_enabled(tid);
3032 * Switch from a model-checker context to a user-thread context. This is the
3033 * complement of ModelChecker::switch_to_master and must be called from the
3034 * model-checker context
3036 * @param thread The user-thread to switch to
3038 void ModelChecker::switch_from_master(Thread *thread)
3040 scheduler->set_current_thread(thread);
3041 Thread::swap(&system_context, thread);
3045 * Switch from a user-context to the "master thread" context (a.k.a. system
3046 * context). This switch is made with the intention of exploring a particular
3047 * model-checking action (described by a ModelAction object). Must be called
3048 * from a user-thread context.
3050 * @param act The current action that will be explored. May be NULL only if
3051 * trace is exiting via an assertion (see ModelChecker::set_assert and
3052 * ModelChecker::has_asserted).
3053 * @return Return the value returned by the current action
3055 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3058 Thread *old = thread_current();
3059 scheduler->set_current_thread(NULL);
3060 ASSERT(!old->get_pending());
3061 old->set_pending(act);
3062 if (Thread::swap(old, &system_context) < 0) {
3063 perror("swap threads");
3066 return old->get_return_value();
3070 * Takes the next step in the execution, if possible.
3071 * @param curr The current step to take
3072 * @return Returns the next Thread to run, if any; NULL if this execution
3075 Thread * ModelChecker::take_step(ModelAction *curr)
3077 Thread *curr_thrd = get_thread(curr);
3078 ASSERT(curr_thrd->get_state() == THREAD_READY);
3080 curr = check_current_action(curr);
3082 /* Infeasible -> don't take any more steps */
3083 if (is_infeasible())
3085 else if (isfeasibleprefix() && have_bug_reports()) {
3090 if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3093 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3094 scheduler->remove_thread(curr_thrd);
3096 Thread *next_thrd = NULL;
3098 next_thrd = action_select_next_thread(curr);
3100 next_thrd = get_next_thread();
3102 DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
3103 next_thrd ? id_to_int(next_thrd->get_id()) : -1);
3108 /** Wrapper to run the user's main function, with appropriate arguments */
3109 void user_main_wrapper(void *)
3111 user_main(model->params.argc, model->params.argv);
3114 /** @brief Run ModelChecker for the user program */
3115 void ModelChecker::run()
3119 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3124 * Stash next pending action(s) for thread(s). There
3125 * should only need to stash one thread's action--the
3126 * thread which just took a step--plus the first step
3127 * for any newly-created thread
3129 for (unsigned int i = 0; i < get_num_threads(); i++) {
3130 thread_id_t tid = int_to_id(i);
3131 Thread *thr = get_thread(tid);
3132 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3133 switch_from_master(thr);
3134 if (thr->is_waiting_on(thr))
3135 assert_bug("Deadlock detected");
3139 /* Catch assertions from prior take_step or from
3140 * between-ModelAction bugs (e.g., data races) */
3144 /* Consume the next action for a Thread */
3145 ModelAction *curr = t->get_pending();
3146 t->set_pending(NULL);
3147 t = take_step(curr);
3148 } while (t && !t->is_model_thread());
3151 * Launch end-of-execution release sequence fixups only when
3152 * the execution is otherwise feasible AND there are:
3154 * (1) pending release sequences
3155 * (2) pending assertions that could be invalidated by a change
3156 * in clock vectors (i.e., data races)
3157 * (3) no pending promises
3159 while (!pending_rel_seqs->empty() &&
3160 is_feasible_prefix_ignore_relseq() &&
3161 !unrealizedraces.empty()) {
3162 model_print("*** WARNING: release sequence fixup action "
3163 "(%zu pending release seuqence(s)) ***\n",
3164 pending_rel_seqs->size());
3165 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3166 std::memory_order_seq_cst, NULL, VALUE_NONE,
3170 } while (next_execution());
3172 model_print("******* Model-checking complete: *******\n");