11 #include "snapshot-interface.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
17 #include "threads-model.h"
19 #include "traceanalysis.h"
21 #define INITIAL_THREAD_ID 0
26 bug_message(const char *str) {
27 const char *fmt = " [BUG] %s\n";
28 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
29 sprintf(msg, fmt, str);
31 ~bug_message() { if (msg) snapshot_free(msg); }
34 void print() { model_print("%s", msg); }
40 * Structure for holding small ModelChecker members that should be snapshotted
42 struct model_snapshot_members {
43 model_snapshot_members() :
44 /* First thread created will have id INITIAL_THREAD_ID */
45 next_thread_id(INITIAL_THREAD_ID),
46 used_sequence_numbers(0),
50 failed_promise(false),
51 too_many_reads(false),
52 no_valid_reads(false),
53 bad_synchronization(false),
57 ~model_snapshot_members() {
58 for (unsigned int i = 0; i < bugs.size(); i++)
63 unsigned int next_thread_id;
64 modelclock_t used_sequence_numbers;
65 ModelAction *next_backtrack;
66 SnapVector<bug_message *> bugs;
67 struct execution_stats stats;
71 /** @brief Incorrectly-ordered synchronization was made */
72 bool bad_synchronization;
78 /** @brief Constructor */
79 ModelChecker::ModelChecker(struct model_params params) :
80 /* Initialize default scheduler */
82 scheduler(new Scheduler()),
84 earliest_diverge(NULL),
85 action_trace(new action_list_t()),
86 thread_map(new HashTable<int, Thread *, int>()),
87 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89 obj_thrd_map(new HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4 >()),
90 promises(new SnapVector<Promise *>()),
91 futurevalues(new SnapVector<struct PendingFutureValue>()),
92 pending_rel_seqs(new SnapVector<struct release_seq *>()),
93 thrd_last_action(new SnapVector<ModelAction *>(1)),
94 thrd_last_fence_release(new SnapVector<ModelAction *>()),
95 node_stack(new NodeStack()),
96 trace_analyses(new ModelVector<Trace_Analysis *>()),
97 priv(new struct model_snapshot_members()),
98 mo_graph(new CycleGraph())
100 /* Initialize a model-checker thread, for special ModelActions */
101 model_thread = new Thread(get_next_id());
102 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
105 /** @brief Destructor */
106 ModelChecker::~ModelChecker()
108 for (unsigned int i = 0; i < get_num_threads(); i++)
109 delete thread_map->get(i);
114 delete condvar_waiters_map;
117 for (unsigned int i = 0; i < promises->size(); i++)
118 delete (*promises)[i];
121 delete pending_rel_seqs;
123 delete thrd_last_action;
124 delete thrd_last_fence_release;
126 for (unsigned int i = 0; i <trace_analyses->size();i++)
127 delete (*trace_analyses)[i];
128 delete trace_analyses;
134 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
136 action_list_t *tmp = hash->get(ptr);
138 tmp = new action_list_t();
144 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
146 SnapVector<action_list_t> *tmp = hash->get(ptr);
148 tmp = new SnapVector<action_list_t>();
154 action_list_t * ModelChecker::get_actions_on_obj(void * obj, thread_id_t tid) {
155 SnapVector<action_list_t> *wrv=obj_thrd_map->get(obj);
158 unsigned int thread=id_to_int(tid);
159 if (thread < wrv->size())
160 return &(*wrv)[thread];
167 * Restores user program to initial state and resets all model-checker data
170 void ModelChecker::reset_to_initial_state()
172 DEBUG("+++ Resetting to initial state +++\n");
173 node_stack->reset_execution();
176 * FIXME: if we utilize partial rollback, we will need to free only
177 * those pending actions which were NOT pending before the rollback
180 for (unsigned int i = 0; i < get_num_threads(); i++)
181 delete get_thread(int_to_id(i))->get_pending();
183 snapshot_backtrack_before(0);
186 /** @return a thread ID for a new Thread */
187 thread_id_t ModelChecker::get_next_id()
189 return priv->next_thread_id++;
192 /** @return the number of user threads created during this execution */
193 unsigned int ModelChecker::get_num_threads() const
195 return priv->next_thread_id;
199 * Must be called from user-thread context (e.g., through the global
200 * thread_current() interface)
202 * @return The currently executing Thread.
204 Thread * ModelChecker::get_current_thread() const
206 return scheduler->get_current_thread();
209 /** @return a sequence number for a new ModelAction */
210 modelclock_t ModelChecker::get_next_seq_num()
212 return ++priv->used_sequence_numbers;
215 Node * ModelChecker::get_curr_node() const
217 return node_stack->get_head();
221 * @brief Select the next thread to execute based on the curren action
223 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
224 * actions should be followed by the execution of their child thread. In either
225 * case, the current action should determine the next thread schedule.
227 * @param curr The current action
228 * @return The next thread to run, if the current action will determine this
229 * selection; otherwise NULL
231 Thread * ModelChecker::action_select_next_thread(const ModelAction *curr) const
233 /* Do not split atomic RMW */
235 return get_thread(curr);
236 /* Follow CREATE with the created thread */
237 if (curr->get_type() == THREAD_CREATE)
238 return curr->get_thread_operand();
243 * @brief Choose the next thread to execute.
245 * This function chooses the next thread that should execute. It can enforce
246 * execution replay/backtracking or, if the model-checker has no preference
247 * regarding the next thread (i.e., when exploring a new execution ordering),
248 * we defer to the scheduler.
250 * @return The next chosen thread to run, if any exist. Or else if the current
251 * execution should terminate, return NULL.
253 Thread * ModelChecker::get_next_thread()
258 * Have we completed exploring the preselected path? Then let the
262 return scheduler->select_next_thread();
264 /* Else, we are trying to replay an execution */
265 ModelAction *next = node_stack->get_next()->get_action();
267 if (next == diverge) {
268 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
269 earliest_diverge = diverge;
271 Node *nextnode = next->get_node();
272 Node *prevnode = nextnode->get_parent();
273 scheduler->update_sleep_set(prevnode);
275 /* Reached divergence point */
276 if (nextnode->increment_behaviors()) {
277 /* Execute the same thread with a new behavior */
278 tid = next->get_tid();
279 node_stack->pop_restofstack(2);
282 /* Make a different thread execute for next step */
283 scheduler->add_sleep(get_thread(next->get_tid()));
284 tid = prevnode->get_next_backtrack();
285 /* Make sure the backtracked thread isn't sleeping. */
286 node_stack->pop_restofstack(1);
287 if (diverge == earliest_diverge) {
288 earliest_diverge = prevnode->get_action();
291 /* Start the round robin scheduler from this thread id */
292 scheduler->set_scheduler_thread(tid);
293 /* The correct sleep set is in the parent node. */
296 DEBUG("*** Divergence point ***\n");
300 tid = next->get_tid();
302 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
303 ASSERT(tid != THREAD_ID_T_NONE);
304 return get_thread(id_to_int(tid));
308 * We need to know what the next actions of all threads in the sleep
309 * set will be. This method computes them and stores the actions at
310 * the corresponding thread object's pending action.
313 void ModelChecker::execute_sleep_set()
315 for (unsigned int i = 0; i < get_num_threads(); i++) {
316 thread_id_t tid = int_to_id(i);
317 Thread *thr = get_thread(tid);
318 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
319 thr->get_pending()->set_sleep_flag();
325 * @brief Should the current action wake up a given thread?
327 * @param curr The current action
328 * @param thread The thread that we might wake up
329 * @return True, if we should wake up the sleeping thread; false otherwise
331 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
333 const ModelAction *asleep = thread->get_pending();
334 /* Don't allow partial RMW to wake anyone up */
337 /* Synchronizing actions may have been backtracked */
338 if (asleep->could_synchronize_with(curr))
340 /* All acquire/release fences and fence-acquire/store-release */
341 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
343 /* Fence-release + store can awake load-acquire on the same location */
344 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
345 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
346 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
352 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
354 for (unsigned int i = 0; i < get_num_threads(); i++) {
355 Thread *thr = get_thread(int_to_id(i));
356 if (scheduler->is_sleep_set(thr)) {
357 if (should_wake_up(curr, thr))
358 /* Remove this thread from sleep set */
359 scheduler->remove_sleep(thr);
364 /** @brief Alert the model-checker that an incorrectly-ordered
365 * synchronization was made */
366 void ModelChecker::set_bad_synchronization()
368 priv->bad_synchronization = true;
372 * Check whether the current trace has triggered an assertion which should halt
375 * @return True, if the execution should be aborted; false otherwise
377 bool ModelChecker::has_asserted() const
379 return priv->asserted;
383 * Trigger a trace assertion which should cause this execution to be halted.
384 * This can be due to a detected bug or due to an infeasibility that should
387 void ModelChecker::set_assert()
389 priv->asserted = true;
393 * Check if we are in a deadlock. Should only be called at the end of an
394 * execution, although it should not give false positives in the middle of an
395 * execution (there should be some ENABLED thread).
397 * @return True if program is in a deadlock; false otherwise
399 bool ModelChecker::is_deadlocked() const
401 bool blocking_threads = false;
402 for (unsigned int i = 0; i < get_num_threads(); i++) {
403 thread_id_t tid = int_to_id(i);
406 Thread *t = get_thread(tid);
407 if (!t->is_model_thread() && t->get_pending())
408 blocking_threads = true;
410 return blocking_threads;
414 * Check if this is a complete execution. That is, have all thread completed
415 * execution (rather than exiting because sleep sets have forced a redundant
418 * @return True if the execution is complete.
420 bool ModelChecker::is_complete_execution() const
422 for (unsigned int i = 0; i < get_num_threads(); i++)
423 if (is_enabled(int_to_id(i)))
429 * @brief Assert a bug in the executing program.
431 * Use this function to assert any sort of bug in the user program. If the
432 * current trace is feasible (actually, a prefix of some feasible execution),
433 * then this execution will be aborted, printing the appropriate message. If
434 * the current trace is not yet feasible, the error message will be stashed and
435 * printed if the execution ever becomes feasible.
437 * @param msg Descriptive message for the bug (do not include newline char)
438 * @return True if bug is immediately-feasible
440 bool ModelChecker::assert_bug(const char *msg, ...)
446 vsnprintf(str, sizeof(str), msg, ap);
449 priv->bugs.push_back(new bug_message(str));
451 if (isfeasibleprefix()) {
459 * @brief Assert a bug in the executing program, asserted by a user thread
460 * @see ModelChecker::assert_bug
461 * @param msg Descriptive message for the bug (do not include newline char)
463 void ModelChecker::assert_user_bug(const char *msg)
465 /* If feasible bug, bail out now */
467 switch_to_master(NULL);
470 /** @return True, if any bugs have been reported for this execution */
471 bool ModelChecker::have_bug_reports() const
473 return priv->bugs.size() != 0;
476 /** @brief Print bug report listing for this execution (if any bugs exist) */
477 void ModelChecker::print_bugs() const
479 if (have_bug_reports()) {
480 model_print("Bug report: %zu bug%s detected\n",
482 priv->bugs.size() > 1 ? "s" : "");
483 for (unsigned int i = 0; i < priv->bugs.size(); i++)
484 priv->bugs[i]->print();
489 * @brief Record end-of-execution stats
491 * Must be run when exiting an execution. Records various stats.
492 * @see struct execution_stats
494 void ModelChecker::record_stats()
497 if (!isfeasibleprefix())
498 stats.num_infeasible++;
499 else if (have_bug_reports())
500 stats.num_buggy_executions++;
501 else if (is_complete_execution())
502 stats.num_complete++;
504 stats.num_redundant++;
507 * @todo We can violate this ASSERT() when fairness/sleep sets
508 * conflict to cause an execution to terminate, e.g. with:
509 * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
511 //ASSERT(scheduler->all_threads_sleeping());
515 /** @brief Print execution stats */
516 void ModelChecker::print_stats() const
518 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
519 model_print("Number of redundant executions: %d\n", stats.num_redundant);
520 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
521 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
522 model_print("Total executions: %d\n", stats.num_total);
523 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
527 * @brief End-of-exeuction print
528 * @param printbugs Should any existing bugs be printed?
530 void ModelChecker::print_execution(bool printbugs) const
532 print_program_output();
534 if (params.verbose) {
535 model_print("Earliest divergence point since last feasible execution:\n");
536 if (earliest_diverge)
537 earliest_diverge->print();
539 model_print("(Not set)\n");
545 /* Don't print invalid bugs */
554 * Queries the model-checker for more executions to explore and, if one
555 * exists, resets the model-checker state to execute a new execution.
557 * @return If there are more executions to explore, return true. Otherwise,
560 bool ModelChecker::next_execution()
563 /* Is this execution a feasible execution that's worth bug-checking? */
564 bool complete = isfeasibleprefix() && (is_complete_execution() ||
567 /* End-of-execution bug checks */
570 assert_bug("Deadlock detected");
573 run_trace_analyses();
579 if (params.verbose || (complete && have_bug_reports()))
580 print_execution(complete);
582 clear_program_output();
585 earliest_diverge = NULL;
587 if ((diverge = get_next_backtrack()) == NULL)
591 model_print("Next execution will diverge at:\n");
595 reset_to_initial_state();
600 * @brief Run trace analyses on complete trace. */
602 void ModelChecker::run_trace_analyses() {
603 for(unsigned int i=0; i < trace_analyses->size(); i++) {
604 (*trace_analyses)[i]->analyze(action_trace);
609 * @brief Find the last fence-related backtracking conflict for a ModelAction
611 * This function performs the search for the most recent conflicting action
612 * against which we should perform backtracking, as affected by fence
613 * operations. This includes pairs of potentially-synchronizing actions which
614 * occur due to fence-acquire or fence-release, and hence should be explored in
615 * the opposite execution order.
617 * @param act The current action
618 * @return The most recent action which conflicts with act due to fences
620 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
622 /* Only perform release/acquire fence backtracking for stores */
623 if (!act->is_write())
626 /* Find a fence-release (or, act is a release) */
627 ModelAction *last_release;
628 if (act->is_release())
631 last_release = get_last_fence_release(act->get_tid());
635 /* Skip past the release */
636 action_list_t *list = action_trace;
637 action_list_t::reverse_iterator rit;
638 for (rit = list->rbegin(); rit != list->rend(); rit++)
639 if (*rit == last_release)
641 ASSERT(rit != list->rend());
646 * load --sb-> fence-acquire */
647 ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
648 ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
649 bool found_acquire_fences = false;
650 for ( ; rit != list->rend(); rit++) {
651 ModelAction *prev = *rit;
652 if (act->same_thread(prev))
655 int tid = id_to_int(prev->get_tid());
657 if (prev->is_read() && act->same_var(prev)) {
658 if (prev->is_acquire()) {
659 /* Found most recent load-acquire, don't need
660 * to search for more fences */
661 if (!found_acquire_fences)
664 prior_loads[tid] = prev;
667 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
668 found_acquire_fences = true;
669 acquire_fences[tid] = prev;
673 ModelAction *latest_backtrack = NULL;
674 for (unsigned int i = 0; i < acquire_fences.size(); i++)
675 if (acquire_fences[i] && prior_loads[i])
676 if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
677 latest_backtrack = acquire_fences[i];
678 return latest_backtrack;
682 * @brief Find the last backtracking conflict for a ModelAction
684 * This function performs the search for the most recent conflicting action
685 * against which we should perform backtracking. This primary includes pairs of
686 * synchronizing actions which should be explored in the opposite execution
689 * @param act The current action
690 * @return The most recent action which conflicts with act
692 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
694 switch (act->get_type()) {
695 /* case ATOMIC_FENCE: fences don't directly cause backtracking */
699 ModelAction *ret = NULL;
701 /* linear search: from most recent to oldest */
702 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
703 action_list_t::reverse_iterator rit;
704 for (rit = list->rbegin(); rit != list->rend(); rit++) {
705 ModelAction *prev = *rit;
706 if (prev->could_synchronize_with(act)) {
712 ModelAction *ret2 = get_last_fence_conflict(act);
722 case ATOMIC_TRYLOCK: {
723 /* linear search: from most recent to oldest */
724 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
725 action_list_t::reverse_iterator rit;
726 for (rit = list->rbegin(); rit != list->rend(); rit++) {
727 ModelAction *prev = *rit;
728 if (act->is_conflicting_lock(prev))
733 case ATOMIC_UNLOCK: {
734 /* linear search: from most recent to oldest */
735 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
736 action_list_t::reverse_iterator rit;
737 for (rit = list->rbegin(); rit != list->rend(); rit++) {
738 ModelAction *prev = *rit;
739 if (!act->same_thread(prev) && prev->is_failed_trylock())
745 /* linear search: from most recent to oldest */
746 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
747 action_list_t::reverse_iterator rit;
748 for (rit = list->rbegin(); rit != list->rend(); rit++) {
749 ModelAction *prev = *rit;
750 if (!act->same_thread(prev) && prev->is_failed_trylock())
752 if (!act->same_thread(prev) && prev->is_notify())
758 case ATOMIC_NOTIFY_ALL:
759 case ATOMIC_NOTIFY_ONE: {
760 /* linear search: from most recent to oldest */
761 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
762 action_list_t::reverse_iterator rit;
763 for (rit = list->rbegin(); rit != list->rend(); rit++) {
764 ModelAction *prev = *rit;
765 if (!act->same_thread(prev) && prev->is_wait())
776 /** This method finds backtracking points where we should try to
777 * reorder the parameter ModelAction against.
779 * @param the ModelAction to find backtracking points for.
781 void ModelChecker::set_backtracking(ModelAction *act)
783 Thread *t = get_thread(act);
784 ModelAction *prev = get_last_conflict(act);
788 Node *node = prev->get_node()->get_parent();
790 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
791 int low_tid, high_tid;
792 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
793 low_tid = id_to_int(act->get_tid());
794 high_tid = low_tid + 1;
797 high_tid = get_num_threads();
800 for (int i = low_tid; i < high_tid; i++) {
801 thread_id_t tid = int_to_id(i);
803 /* Make sure this thread can be enabled here. */
804 if (i >= node->get_num_threads())
807 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
808 /* Don't backtrack into a point where the thread is disabled or sleeping. */
809 if (node->enabled_status(tid) != THREAD_ENABLED)
812 /* Check if this has been explored already */
813 if (node->has_been_explored(tid))
816 /* See if fairness allows */
817 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
819 for (int t = 0; t < node->get_num_threads(); t++) {
820 thread_id_t tother = int_to_id(t);
821 if (node->is_enabled(tother) && node->has_priority(tother)) {
830 /* See if CHESS-like yield fairness allows */
831 if (model->params.yieldon) {
833 for (int t = 0; t < node->get_num_threads(); t++) {
834 thread_id_t tother = int_to_id(t);
835 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
844 /* Cache the latest backtracking point */
845 set_latest_backtrack(prev);
847 /* If this is a new backtracking point, mark the tree */
848 if (!node->set_backtrack(tid))
850 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
851 id_to_int(prev->get_tid()),
852 id_to_int(t->get_id()));
861 * @brief Cache the a backtracking point as the "most recent", if eligible
863 * Note that this does not prepare the NodeStack for this backtracking
864 * operation, it only caches the action on a per-execution basis
866 * @param act The operation at which we should explore a different next action
867 * (i.e., backtracking point)
868 * @return True, if this action is now the most recent backtracking point;
871 bool ModelChecker::set_latest_backtrack(ModelAction *act)
873 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
874 priv->next_backtrack = act;
881 * Returns last backtracking point. The model checker will explore a different
882 * path for this point in the next execution.
883 * @return The ModelAction at which the next execution should diverge.
885 ModelAction * ModelChecker::get_next_backtrack()
887 ModelAction *next = priv->next_backtrack;
888 priv->next_backtrack = NULL;
893 * Processes a read model action.
894 * @param curr is the read model action to process.
895 * @return True if processing this read updates the mo_graph.
897 bool ModelChecker::process_read(ModelAction *curr)
899 Node *node = curr->get_node();
901 bool updated = false;
902 switch (node->get_read_from_status()) {
903 case READ_FROM_PAST: {
904 const ModelAction *rf = node->get_read_from_past();
907 mo_graph->startChanges();
909 ASSERT(!is_infeasible());
910 if (!check_recency(curr, rf)) {
911 if (node->increment_read_from()) {
912 mo_graph->rollbackChanges();
915 priv->too_many_reads = true;
919 updated = r_modification_order(curr, rf);
921 mo_graph->commitChanges();
922 mo_check_promises(curr, true);
925 case READ_FROM_PROMISE: {
926 Promise *promise = curr->get_node()->get_read_from_promise();
927 if (promise->add_reader(curr))
928 priv->failed_promise = true;
929 curr->set_read_from_promise(promise);
930 mo_graph->startChanges();
931 if (!check_recency(curr, promise))
932 priv->too_many_reads = true;
933 updated = r_modification_order(curr, promise);
934 mo_graph->commitChanges();
937 case READ_FROM_FUTURE: {
938 /* Read from future value */
939 struct future_value fv = node->get_future_value();
940 Promise *promise = new Promise(curr, fv);
941 curr->set_read_from_promise(promise);
942 promises->push_back(promise);
943 mo_graph->startChanges();
944 updated = r_modification_order(curr, promise);
945 mo_graph->commitChanges();
951 get_thread(curr)->set_return_value(curr->get_return_value());
957 * Processes a lock, trylock, or unlock model action. @param curr is
958 * the read model action to process.
960 * The try lock operation checks whether the lock is taken. If not,
961 * it falls to the normal lock operation case. If so, it returns
964 * The lock operation has already been checked that it is enabled, so
965 * it just grabs the lock and synchronizes with the previous unlock.
967 * The unlock operation has to re-enable all of the threads that are
968 * waiting on the lock.
970 * @return True if synchronization was updated; false otherwise
972 bool ModelChecker::process_mutex(ModelAction *curr)
974 std::mutex *mutex = curr->get_mutex();
975 struct std::mutex_state *state = NULL;
978 state = mutex->get_state();
980 switch (curr->get_type()) {
981 case ATOMIC_TRYLOCK: {
982 bool success = !state->locked;
983 curr->set_try_lock(success);
985 get_thread(curr)->set_return_value(0);
988 get_thread(curr)->set_return_value(1);
990 //otherwise fall into the lock case
992 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
993 assert_bug("Lock access before initialization");
994 state->locked = get_thread(curr);
995 ModelAction *unlock = get_last_unlock(curr);
996 //synchronize with the previous unlock statement
997 if (unlock != NULL) {
998 curr->synchronize_with(unlock);
1004 case ATOMIC_UNLOCK: {
1005 /* wake up the other threads */
1006 for (unsigned int i = 0; i < get_num_threads(); i++) {
1007 Thread *t = get_thread(int_to_id(i));
1008 Thread *curr_thrd = get_thread(curr);
1009 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
1013 /* unlock the lock - after checking who was waiting on it */
1014 state->locked = NULL;
1016 if (!curr->is_wait())
1017 break; /* The rest is only for ATOMIC_WAIT */
1019 /* Should we go to sleep? (simulate spurious failures) */
1020 if (curr->get_node()->get_misc() == 0) {
1021 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
1023 scheduler->sleep(get_thread(curr));
1027 case ATOMIC_NOTIFY_ALL: {
1028 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1029 //activate all the waiting threads
1030 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
1031 scheduler->wake(get_thread(*rit));
1036 case ATOMIC_NOTIFY_ONE: {
1037 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1038 int wakeupthread = curr->get_node()->get_misc();
1039 action_list_t::iterator it = waiters->begin();
1040 advance(it, wakeupthread);
1041 scheduler->wake(get_thread(*it));
1053 * @brief Check if the current pending promises allow a future value to be sent
1055 * If one of the following is true:
1056 * (a) there are no pending promises
1057 * (b) the reader and writer do not cross any promises
1058 * Then, it is safe to pass a future value back now.
1060 * Otherwise, we must save the pending future value until (a) or (b) is true
1062 * @param writer The operation which sends the future value. Must be a write.
1063 * @param reader The operation which will observe the value. Must be a read.
1064 * @return True if the future value can be sent now; false if it must wait.
1066 bool ModelChecker::promises_may_allow(const ModelAction *writer,
1067 const ModelAction *reader) const
1069 if (promises->empty())
1071 for(int i=promises->size()-1;i>=0;i--) {
1072 ModelAction *pr=(*promises)[i]->get_reader(0);
1073 //reader is after promise...doesn't cross any promise
1076 //writer is after promise, reader before...bad...
1084 * @brief Add a future value to a reader
1086 * This function performs a few additional checks to ensure that the future
1087 * value can be feasibly observed by the reader
1089 * @param writer The operation whose value is sent. Must be a write.
1090 * @param reader The read operation which may read the future value. Must be a read.
1092 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1094 /* Do more ambitious checks now that mo is more complete */
1095 if (!mo_may_allow(writer, reader))
1098 Node *node = reader->get_node();
1100 /* Find an ancestor thread which exists at the time of the reader */
1101 Thread *write_thread = get_thread(writer);
1102 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1103 write_thread = write_thread->get_parent();
1105 struct future_value fv = {
1106 writer->get_write_value(),
1107 writer->get_seq_number() + params.maxfuturedelay,
1108 write_thread->get_id(),
1110 if (node->add_future_value(fv))
1111 set_latest_backtrack(reader);
1115 * Process a write ModelAction
1116 * @param curr The ModelAction to process
1117 * @return True if the mo_graph was updated or promises were resolved
1119 bool ModelChecker::process_write(ModelAction *curr)
1121 /* Readers to which we may send our future value */
1122 ModelVector<ModelAction *> send_fv;
1124 const ModelAction *earliest_promise_reader;
1125 bool updated_promises = false;
1127 bool updated_mod_order = w_modification_order(curr, &send_fv);
1128 Promise *promise = pop_promise_to_resolve(curr);
1131 earliest_promise_reader = promise->get_reader(0);
1132 updated_promises = resolve_promise(curr, promise);
1134 earliest_promise_reader = NULL;
1136 for (unsigned int i = 0; i < send_fv.size(); i++) {
1137 ModelAction *read = send_fv[i];
1139 /* Don't send future values to reads after the Promise we resolve */
1140 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
1141 /* Check if future value can be sent immediately */
1142 if (promises_may_allow(curr, read)) {
1143 add_future_value(curr, read);
1145 futurevalues->push_back(PendingFutureValue(curr, read));
1150 /* Check the pending future values */
1151 for (int i = (int)futurevalues->size() - 1; i >= 0; i--) {
1152 struct PendingFutureValue pfv = (*futurevalues)[i];
1153 if (promises_may_allow(pfv.writer, pfv.reader)) {
1154 add_future_value(pfv.writer, pfv.reader);
1155 futurevalues->erase(futurevalues->begin() + i);
1159 mo_graph->commitChanges();
1160 mo_check_promises(curr, false);
1162 get_thread(curr)->set_return_value(VALUE_NONE);
1163 return updated_mod_order || updated_promises;
1167 * Process a fence ModelAction
1168 * @param curr The ModelAction to process
1169 * @return True if synchronization was updated
1171 bool ModelChecker::process_fence(ModelAction *curr)
1174 * fence-relaxed: no-op
1175 * fence-release: only log the occurence (not in this function), for
1176 * use in later synchronization
1177 * fence-acquire (this function): search for hypothetical release
1179 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
1181 bool updated = false;
1182 if (curr->is_acquire()) {
1183 action_list_t *list = action_trace;
1184 action_list_t::reverse_iterator rit;
1185 /* Find X : is_read(X) && X --sb-> curr */
1186 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1187 ModelAction *act = *rit;
1190 if (act->get_tid() != curr->get_tid())
1192 /* Stop at the beginning of the thread */
1193 if (act->is_thread_start())
1195 /* Stop once we reach a prior fence-acquire */
1196 if (act->is_fence() && act->is_acquire())
1198 if (!act->is_read())
1200 /* read-acquire will find its own release sequences */
1201 if (act->is_acquire())
1204 /* Establish hypothetical release sequences */
1205 rel_heads_list_t release_heads;
1206 get_release_seq_heads(curr, act, &release_heads);
1207 for (unsigned int i = 0; i < release_heads.size(); i++)
1208 if (!curr->synchronize_with(release_heads[i]))
1209 set_bad_synchronization();
1210 if (release_heads.size() != 0)
1218 * @brief Process the current action for thread-related activity
1220 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1221 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1222 * synchronization, etc. This function is a no-op for non-THREAD actions
1223 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1225 * @param curr The current action
1226 * @return True if synchronization was updated or a thread completed
1228 bool ModelChecker::process_thread_action(ModelAction *curr)
1230 bool updated = false;
1232 switch (curr->get_type()) {
1233 case THREAD_CREATE: {
1234 thrd_t *thrd = (thrd_t *)curr->get_location();
1235 struct thread_params *params = (struct thread_params *)curr->get_value();
1236 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1238 th->set_creation(curr);
1239 /* Promises can be satisfied by children */
1240 for (unsigned int i = 0; i < promises->size(); i++) {
1241 Promise *promise = (*promises)[i];
1242 if (promise->thread_is_available(curr->get_tid()))
1243 promise->add_thread(th->get_id());
1248 Thread *blocking = curr->get_thread_operand();
1249 ModelAction *act = get_last_action(blocking->get_id());
1250 curr->synchronize_with(act);
1251 updated = true; /* trigger rel-seq checks */
1254 case THREAD_FINISH: {
1255 Thread *th = get_thread(curr);
1256 /* Wake up any joining threads */
1257 for (unsigned int i = 0; i < get_num_threads(); i++) {
1258 Thread *waiting = get_thread(int_to_id(i));
1259 if (waiting->waiting_on() == th &&
1260 waiting->get_pending()->is_thread_join())
1261 scheduler->wake(waiting);
1264 /* Completed thread can't satisfy promises */
1265 for (unsigned int i = 0; i < promises->size(); i++) {
1266 Promise *promise = (*promises)[i];
1267 if (promise->thread_is_available(th->get_id()))
1268 if (promise->eliminate_thread(th->get_id()))
1269 priv->failed_promise = true;
1271 updated = true; /* trigger rel-seq checks */
1274 case THREAD_START: {
1275 check_promises(curr->get_tid(), NULL, curr->get_cv());
1286 * @brief Process the current action for release sequence fixup activity
1288 * Performs model-checker release sequence fixups for the current action,
1289 * forcing a single pending release sequence to break (with a given, potential
1290 * "loose" write) or to complete (i.e., synchronize). If a pending release
1291 * sequence forms a complete release sequence, then we must perform the fixup
1292 * synchronization, mo_graph additions, etc.
1294 * @param curr The current action; must be a release sequence fixup action
1295 * @param work_queue The work queue to which to add work items as they are
1298 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1300 const ModelAction *write = curr->get_node()->get_relseq_break();
1301 struct release_seq *sequence = pending_rel_seqs->back();
1302 pending_rel_seqs->pop_back();
1304 ModelAction *acquire = sequence->acquire;
1305 const ModelAction *rf = sequence->rf;
1306 const ModelAction *release = sequence->release;
1310 ASSERT(release->same_thread(rf));
1312 if (write == NULL) {
1314 * @todo Forcing a synchronization requires that we set
1315 * modification order constraints. For instance, we can't allow
1316 * a fixup sequence in which two separate read-acquire
1317 * operations read from the same sequence, where the first one
1318 * synchronizes and the other doesn't. Essentially, we can't
1319 * allow any writes to insert themselves between 'release' and
1323 /* Must synchronize */
1324 if (!acquire->synchronize_with(release)) {
1325 set_bad_synchronization();
1328 /* Re-check all pending release sequences */
1329 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1330 /* Re-check act for mo_graph edges */
1331 work_queue->push_back(MOEdgeWorkEntry(acquire));
1333 /* propagate synchronization to later actions */
1334 action_list_t::reverse_iterator rit = action_trace->rbegin();
1335 for (; (*rit) != acquire; rit++) {
1336 ModelAction *propagate = *rit;
1337 if (acquire->happens_before(propagate)) {
1338 propagate->synchronize_with(acquire);
1339 /* Re-check 'propagate' for mo_graph edges */
1340 work_queue->push_back(MOEdgeWorkEntry(propagate));
1344 /* Break release sequence with new edges:
1345 * release --mo--> write --mo--> rf */
1346 mo_graph->addEdge(release, write);
1347 mo_graph->addEdge(write, rf);
1350 /* See if we have realized a data race */
1355 * Initialize the current action by performing one or more of the following
1356 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1357 * in the NodeStack, manipulating backtracking sets, allocating and
1358 * initializing clock vectors, and computing the promises to fulfill.
1360 * @param curr The current action, as passed from the user context; may be
1361 * freed/invalidated after the execution of this function, with a different
1362 * action "returned" its place (pass-by-reference)
1363 * @return True if curr is a newly-explored action; false otherwise
1365 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1367 ModelAction *newcurr;
1369 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1370 newcurr = process_rmw(*curr);
1373 if (newcurr->is_rmw())
1374 compute_promises(newcurr);
1380 (*curr)->set_seq_number(get_next_seq_num());
1382 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1384 /* First restore type and order in case of RMW operation */
1385 if ((*curr)->is_rmwr())
1386 newcurr->copy_typeandorder(*curr);
1388 ASSERT((*curr)->get_location() == newcurr->get_location());
1389 newcurr->copy_from_new(*curr);
1391 /* Discard duplicate ModelAction; use action from NodeStack */
1394 /* Always compute new clock vector */
1395 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1398 return false; /* Action was explored previously */
1402 /* Always compute new clock vector */
1403 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1405 /* Assign most recent release fence */
1406 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1409 * Perform one-time actions when pushing new ModelAction onto
1412 if (newcurr->is_write())
1413 compute_promises(newcurr);
1414 else if (newcurr->is_relseq_fixup())
1415 compute_relseq_breakwrites(newcurr);
1416 else if (newcurr->is_wait())
1417 newcurr->get_node()->set_misc_max(2);
1418 else if (newcurr->is_notify_one()) {
1419 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1421 return true; /* This was a new ModelAction */
1426 * @brief Establish reads-from relation between two actions
1428 * Perform basic operations involved with establishing a concrete rf relation,
1429 * including setting the ModelAction data and checking for release sequences.
1431 * @param act The action that is reading (must be a read)
1432 * @param rf The action from which we are reading (must be a write)
1434 * @return True if this read established synchronization
1436 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1439 ASSERT(rf->is_write());
1441 act->set_read_from(rf);
1442 if (act->is_acquire()) {
1443 rel_heads_list_t release_heads;
1444 get_release_seq_heads(act, act, &release_heads);
1445 int num_heads = release_heads.size();
1446 for (unsigned int i = 0; i < release_heads.size(); i++)
1447 if (!act->synchronize_with(release_heads[i])) {
1448 set_bad_synchronization();
1451 return num_heads > 0;
1457 * Check promises and eliminate potentially-satisfying threads when a thread is
1458 * blocked (e.g., join, lock). A thread which is waiting on another thread can
1459 * no longer satisfy a promise generated from that thread.
1461 * @param blocker The thread on which a thread is waiting
1462 * @param waiting The waiting thread
1464 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1466 for (unsigned int i = 0; i < promises->size(); i++) {
1467 Promise *promise = (*promises)[i];
1468 if (!promise->thread_is_available(waiting->get_id()))
1470 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1471 ModelAction *reader = promise->get_reader(j);
1472 if (reader->get_tid() != blocker->get_id())
1474 if (promise->eliminate_thread(waiting->get_id())) {
1475 /* Promise has failed */
1476 priv->failed_promise = true;
1478 /* Only eliminate the 'waiting' thread once */
1486 * @brief Check whether a model action is enabled.
1488 * Checks whether a lock or join operation would be successful (i.e., is the
1489 * lock already locked, or is the joined thread already complete). If not, put
1490 * the action in a waiter list.
1492 * @param curr is the ModelAction to check whether it is enabled.
1493 * @return a bool that indicates whether the action is enabled.
1495 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1496 if (curr->is_lock()) {
1497 std::mutex *lock = curr->get_mutex();
1498 struct std::mutex_state *state = lock->get_state();
1501 } else if (curr->is_thread_join()) {
1502 Thread *blocking = curr->get_thread_operand();
1503 if (!blocking->is_complete()) {
1504 thread_blocking_check_promises(blocking, get_thread(curr));
1513 * This is the heart of the model checker routine. It performs model-checking
1514 * actions corresponding to a given "current action." Among other processes, it
1515 * calculates reads-from relationships, updates synchronization clock vectors,
1516 * forms a memory_order constraints graph, and handles replay/backtrack
1517 * execution when running permutations of previously-observed executions.
1519 * @param curr The current action to process
1520 * @return The ModelAction that is actually executed; may be different than
1521 * curr; may be NULL, if the current action is not enabled to run
1523 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1526 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1527 bool newly_explored = initialize_curr_action(&curr);
1531 wake_up_sleeping_actions(curr);
1533 /* Compute fairness information for CHESS yield algorithm */
1534 if (model->params.yieldon) {
1535 curr->get_node()->update_yield(scheduler);
1538 /* Add the action to lists before any other model-checking tasks */
1539 if (!second_part_of_rmw)
1540 add_action_to_lists(curr);
1542 /* Build may_read_from set for newly-created actions */
1543 if (newly_explored && curr->is_read())
1544 build_may_read_from(curr);
1546 /* Initialize work_queue with the "current action" work */
1547 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1548 while (!work_queue.empty() && !has_asserted()) {
1549 WorkQueueEntry work = work_queue.front();
1550 work_queue.pop_front();
1552 switch (work.type) {
1553 case WORK_CHECK_CURR_ACTION: {
1554 ModelAction *act = work.action;
1555 bool update = false; /* update this location's release seq's */
1556 bool update_all = false; /* update all release seq's */
1558 if (process_thread_action(curr))
1561 if (act->is_read() && !second_part_of_rmw && process_read(act))
1564 if (act->is_write() && process_write(act))
1567 if (act->is_fence() && process_fence(act))
1570 if (act->is_mutex_op() && process_mutex(act))
1573 if (act->is_relseq_fixup())
1574 process_relseq_fixup(curr, &work_queue);
1577 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1579 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1582 case WORK_CHECK_RELEASE_SEQ:
1583 resolve_release_sequences(work.location, &work_queue);
1585 case WORK_CHECK_MO_EDGES: {
1586 /** @todo Complete verification of work_queue */
1587 ModelAction *act = work.action;
1588 bool updated = false;
1590 if (act->is_read()) {
1591 const ModelAction *rf = act->get_reads_from();
1592 const Promise *promise = act->get_reads_from_promise();
1594 if (r_modification_order(act, rf))
1596 } else if (promise) {
1597 if (r_modification_order(act, promise))
1601 if (act->is_write()) {
1602 if (w_modification_order(act, NULL))
1605 mo_graph->commitChanges();
1608 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1617 check_curr_backtracking(curr);
1618 set_backtracking(curr);
1622 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1624 Node *currnode = curr->get_node();
1625 Node *parnode = currnode->get_parent();
1627 if ((parnode && !parnode->backtrack_empty()) ||
1628 !currnode->misc_empty() ||
1629 !currnode->read_from_empty() ||
1630 !currnode->promise_empty() ||
1631 !currnode->relseq_break_empty()) {
1632 set_latest_backtrack(curr);
1636 bool ModelChecker::promises_expired() const
1638 for (unsigned int i = 0; i < promises->size(); i++) {
1639 Promise *promise = (*promises)[i];
1640 if (promise->get_expiration() < priv->used_sequence_numbers)
1647 * This is the strongest feasibility check available.
1648 * @return whether the current trace (partial or complete) must be a prefix of
1651 bool ModelChecker::isfeasibleprefix() const
1653 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1657 * Print disagnostic information about an infeasible execution
1658 * @param prefix A string to prefix the output with; if NULL, then a default
1659 * message prefix will be provided
1661 void ModelChecker::print_infeasibility(const char *prefix) const
1665 if (mo_graph->checkForCycles())
1666 ptr += sprintf(ptr, "[mo cycle]");
1667 if (priv->failed_promise)
1668 ptr += sprintf(ptr, "[failed promise]");
1669 if (priv->too_many_reads)
1670 ptr += sprintf(ptr, "[too many reads]");
1671 if (priv->no_valid_reads)
1672 ptr += sprintf(ptr, "[no valid reads-from]");
1673 if (priv->bad_synchronization)
1674 ptr += sprintf(ptr, "[bad sw ordering]");
1675 if (promises_expired())
1676 ptr += sprintf(ptr, "[promise expired]");
1677 if (promises->size() != 0)
1678 ptr += sprintf(ptr, "[unresolved promise]");
1680 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1684 * Returns whether the current completed trace is feasible, except for pending
1685 * release sequences.
1687 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1689 return !is_infeasible() && promises->size() == 0;
1693 * Check if the current partial trace is infeasible. Does not check any
1694 * end-of-execution flags, which might rule out the execution. Thus, this is
1695 * useful only for ruling an execution as infeasible.
1696 * @return whether the current partial trace is infeasible.
1698 bool ModelChecker::is_infeasible() const
1700 return mo_graph->checkForCycles() ||
1701 priv->no_valid_reads ||
1702 priv->failed_promise ||
1703 priv->too_many_reads ||
1704 priv->bad_synchronization ||
1708 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1709 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1710 ModelAction *lastread = get_last_action(act->get_tid());
1711 lastread->process_rmw(act);
1712 if (act->is_rmw()) {
1713 if (lastread->get_reads_from())
1714 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1716 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1717 mo_graph->commitChanges();
1723 * A helper function for ModelChecker::check_recency, to check if the current
1724 * thread is able to read from a different write/promise for 'params.maxreads'
1725 * number of steps and if that write/promise should become visible (i.e., is
1726 * ordered later in the modification order). This helps model memory liveness.
1728 * @param curr The current action. Must be a read.
1729 * @param rf The write/promise from which we plan to read
1730 * @param other_rf The write/promise from which we may read
1731 * @return True if we were able to read from other_rf for params.maxreads steps
1733 template <typename T, typename U>
1734 bool ModelChecker::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1736 /* Need a different write/promise */
1737 if (other_rf->equals(rf))
1740 /* Only look for "newer" writes/promises */
1741 if (!mo_graph->checkReachable(rf, other_rf))
1744 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1745 action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1746 action_list_t::reverse_iterator rit = list->rbegin();
1747 ASSERT((*rit) == curr);
1748 /* Skip past curr */
1751 /* Does this write/promise work for everyone? */
1752 for (int i = 0; i < params.maxreads; i++, rit++) {
1753 ModelAction *act = *rit;
1754 if (!act->may_read_from(other_rf))
1761 * Checks whether a thread has read from the same write or Promise for too many
1762 * times without seeing the effects of a later write/Promise.
1765 * 1) there must a different write/promise that we could read from,
1766 * 2) we must have read from the same write/promise in excess of maxreads times,
1767 * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1768 * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1770 * If so, we decide that the execution is no longer feasible.
1772 * @param curr The current action. Must be a read.
1773 * @param rf The ModelAction/Promise from which we might read.
1774 * @return True if the read should succeed; false otherwise
1776 template <typename T>
1777 bool ModelChecker::check_recency(ModelAction *curr, const T *rf) const
1779 if (!params.maxreads)
1782 //NOTE: Next check is just optimization, not really necessary....
1783 if (curr->get_node()->get_read_from_past_size() +
1784 curr->get_node()->get_read_from_promise_size() <= 1)
1787 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1788 int tid = id_to_int(curr->get_tid());
1789 ASSERT(tid < (int)thrd_lists->size());
1790 action_list_t *list = &(*thrd_lists)[tid];
1791 action_list_t::reverse_iterator rit = list->rbegin();
1792 ASSERT((*rit) == curr);
1793 /* Skip past curr */
1796 action_list_t::reverse_iterator ritcopy = rit;
1797 /* See if we have enough reads from the same value */
1798 for (int count = 0; count < params.maxreads; ritcopy++, count++) {
1799 if (ritcopy == list->rend())
1801 ModelAction *act = *ritcopy;
1802 if (!act->is_read())
1804 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1806 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1808 if (act->get_node()->get_read_from_past_size() +
1809 act->get_node()->get_read_from_promise_size() <= 1)
1812 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1813 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1814 if (should_read_instead(curr, rf, write))
1815 return false; /* liveness failure */
1817 for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1818 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1819 if (should_read_instead(curr, rf, promise))
1820 return false; /* liveness failure */
1826 * @brief Updates the mo_graph with the constraints imposed from the current
1829 * Basic idea is the following: Go through each other thread and find
1830 * the last action that happened before our read. Two cases:
1832 * -# The action is a write: that write must either occur before
1833 * the write we read from or be the write we read from.
1834 * -# The action is a read: the write that that action read from
1835 * must occur before the write we read from or be the same write.
1837 * @param curr The current action. Must be a read.
1838 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1839 * @return True if modification order edges were added; false otherwise
1841 template <typename rf_type>
1842 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1844 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1847 ASSERT(curr->is_read());
1849 /* Last SC fence in the current thread */
1850 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1851 ModelAction *last_sc_write = NULL;
1852 if (curr->is_seqcst())
1853 last_sc_write = get_last_seq_cst_write(curr);
1855 /* Iterate over all threads */
1856 for (i = 0; i < thrd_lists->size(); i++) {
1857 /* Last SC fence in thread i */
1858 ModelAction *last_sc_fence_thread_local = NULL;
1859 if (int_to_id((int)i) != curr->get_tid())
1860 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1862 /* Last SC fence in thread i, before last SC fence in current thread */
1863 ModelAction *last_sc_fence_thread_before = NULL;
1864 if (last_sc_fence_local)
1865 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1867 /* Iterate over actions in thread, starting from most recent */
1868 action_list_t *list = &(*thrd_lists)[i];
1869 action_list_t::reverse_iterator rit;
1870 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1871 ModelAction *act = *rit;
1876 /* Don't want to add reflexive edges on 'rf' */
1877 if (act->equals(rf)) {
1878 if (act->happens_before(curr))
1884 if (act->is_write()) {
1885 /* C++, Section 29.3 statement 5 */
1886 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1887 *act < *last_sc_fence_thread_local) {
1888 added = mo_graph->addEdge(act, rf) || added;
1891 /* C++, Section 29.3 statement 4 */
1892 else if (act->is_seqcst() && last_sc_fence_local &&
1893 *act < *last_sc_fence_local) {
1894 added = mo_graph->addEdge(act, rf) || added;
1897 /* C++, Section 29.3 statement 6 */
1898 else if (last_sc_fence_thread_before &&
1899 *act < *last_sc_fence_thread_before) {
1900 added = mo_graph->addEdge(act, rf) || added;
1905 /* C++, Section 29.3 statement 3 (second subpoint) */
1906 if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1907 added = mo_graph->addEdge(act, rf) || added;
1912 * Include at most one act per-thread that "happens
1915 if (act->happens_before(curr)) {
1916 if (act->is_write()) {
1917 added = mo_graph->addEdge(act, rf) || added;
1919 const ModelAction *prevrf = act->get_reads_from();
1920 const Promise *prevrf_promise = act->get_reads_from_promise();
1922 if (!prevrf->equals(rf))
1923 added = mo_graph->addEdge(prevrf, rf) || added;
1924 } else if (!prevrf_promise->equals(rf)) {
1925 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1934 * All compatible, thread-exclusive promises must be ordered after any
1935 * concrete loads from the same thread
1937 for (unsigned int i = 0; i < promises->size(); i++)
1938 if ((*promises)[i]->is_compatible_exclusive(curr))
1939 added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1945 * Updates the mo_graph with the constraints imposed from the current write.
1947 * Basic idea is the following: Go through each other thread and find
1948 * the lastest action that happened before our write. Two cases:
1950 * (1) The action is a write => that write must occur before
1953 * (2) The action is a read => the write that that action read from
1954 * must occur before the current write.
1956 * This method also handles two other issues:
1958 * (I) Sequential Consistency: Making sure that if the current write is
1959 * seq_cst, that it occurs after the previous seq_cst write.
1961 * (II) Sending the write back to non-synchronizing reads.
1963 * @param curr The current action. Must be a write.
1964 * @param send_fv A vector for stashing reads to which we may pass our future
1965 * value. If NULL, then don't record any future values.
1966 * @return True if modification order edges were added; false otherwise
1968 bool ModelChecker::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1970 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1973 ASSERT(curr->is_write());
1975 if (curr->is_seqcst()) {
1976 /* We have to at least see the last sequentially consistent write,
1977 so we are initialized. */
1978 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1979 if (last_seq_cst != NULL) {
1980 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1984 /* Last SC fence in the current thread */
1985 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1987 /* Iterate over all threads */
1988 for (i = 0; i < thrd_lists->size(); i++) {
1989 /* Last SC fence in thread i, before last SC fence in current thread */
1990 ModelAction *last_sc_fence_thread_before = NULL;
1991 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1992 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1994 /* Iterate over actions in thread, starting from most recent */
1995 action_list_t *list = &(*thrd_lists)[i];
1996 action_list_t::reverse_iterator rit;
1997 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1998 ModelAction *act = *rit;
2001 * 1) If RMW and it actually read from something, then we
2002 * already have all relevant edges, so just skip to next
2005 * 2) If RMW and it didn't read from anything, we should
2006 * whatever edge we can get to speed up convergence.
2008 * 3) If normal write, we need to look at earlier actions, so
2009 * continue processing list.
2011 if (curr->is_rmw()) {
2012 if (curr->get_reads_from() != NULL)
2020 /* C++, Section 29.3 statement 7 */
2021 if (last_sc_fence_thread_before && act->is_write() &&
2022 *act < *last_sc_fence_thread_before) {
2023 added = mo_graph->addEdge(act, curr) || added;
2028 * Include at most one act per-thread that "happens
2031 if (act->happens_before(curr)) {
2033 * Note: if act is RMW, just add edge:
2035 * The following edge should be handled elsewhere:
2036 * readfrom(act) --mo--> act
2038 if (act->is_write())
2039 added = mo_graph->addEdge(act, curr) || added;
2040 else if (act->is_read()) {
2041 //if previous read accessed a null, just keep going
2042 if (act->get_reads_from() == NULL)
2044 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
2047 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
2048 !act->same_thread(curr)) {
2049 /* We have an action that:
2050 (1) did not happen before us
2051 (2) is a read and we are a write
2052 (3) cannot synchronize with us
2053 (4) is in a different thread
2055 that read could potentially read from our write. Note that
2056 these checks are overly conservative at this point, we'll
2057 do more checks before actually removing the
2061 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
2062 if (!is_infeasible())
2063 send_fv->push_back(act);
2064 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
2065 add_future_value(curr, act);
2072 * All compatible, thread-exclusive promises must be ordered after any
2073 * concrete stores to the same thread, or else they can be merged with
2076 for (unsigned int i = 0; i < promises->size(); i++)
2077 if ((*promises)[i]->is_compatible_exclusive(curr))
2078 added = mo_graph->addEdge(curr, (*promises)[i]) || added;
2083 /** Arbitrary reads from the future are not allowed. Section 29.3
2084 * part 9 places some constraints. This method checks one result of constraint
2085 * constraint. Others require compiler support. */
2086 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
2088 if (!writer->is_rmw())
2091 if (!reader->is_rmw())
2094 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
2095 if (search == reader)
2097 if (search->get_tid() == reader->get_tid() &&
2098 search->happens_before(reader))
2106 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
2107 * some constraints. This method checks one the following constraint (others
2108 * require compiler support):
2110 * If X --hb-> Y --mo-> Z, then X should not read from Z.
2112 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
2114 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
2116 /* Iterate over all threads */
2117 for (i = 0; i < thrd_lists->size(); i++) {
2118 const ModelAction *write_after_read = NULL;
2120 /* Iterate over actions in thread, starting from most recent */
2121 action_list_t *list = &(*thrd_lists)[i];
2122 action_list_t::reverse_iterator rit;
2123 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2124 ModelAction *act = *rit;
2126 /* Don't disallow due to act == reader */
2127 if (!reader->happens_before(act) || reader == act)
2129 else if (act->is_write())
2130 write_after_read = act;
2131 else if (act->is_read() && act->get_reads_from() != NULL)
2132 write_after_read = act->get_reads_from();
2135 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2142 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2143 * The ModelAction under consideration is expected to be taking part in
2144 * release/acquire synchronization as an object of the "reads from" relation.
2145 * Note that this can only provide release sequence support for RMW chains
2146 * which do not read from the future, as those actions cannot be traced until
2147 * their "promise" is fulfilled. Similarly, we may not even establish the
2148 * presence of a release sequence with certainty, as some modification order
2149 * constraints may be decided further in the future. Thus, this function
2150 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2151 * and a boolean representing certainty.
2153 * @param rf The action that might be part of a release sequence. Must be a
2155 * @param release_heads A pass-by-reference style return parameter. After
2156 * execution of this function, release_heads will contain the heads of all the
2157 * relevant release sequences, if any exists with certainty
2158 * @param pending A pass-by-reference style return parameter which is only used
2159 * when returning false (i.e., uncertain). Returns most information regarding
2160 * an uncertain release sequence, including any write operations that might
2161 * break the sequence.
2162 * @return true, if the ModelChecker is certain that release_heads is complete;
2165 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2166 rel_heads_list_t *release_heads,
2167 struct release_seq *pending) const
2169 /* Only check for release sequences if there are no cycles */
2170 if (mo_graph->checkForCycles())
2173 for ( ; rf != NULL; rf = rf->get_reads_from()) {
2174 ASSERT(rf->is_write());
2176 if (rf->is_release())
2177 release_heads->push_back(rf);
2178 else if (rf->get_last_fence_release())
2179 release_heads->push_back(rf->get_last_fence_release());
2181 break; /* End of RMW chain */
2183 /** @todo Need to be smarter here... In the linux lock
2184 * example, this will run to the beginning of the program for
2186 /** @todo The way to be smarter here is to keep going until 1
2187 * thread has a release preceded by an acquire and you've seen
2190 /* acq_rel RMW is a sufficient stopping condition */
2191 if (rf->is_acquire() && rf->is_release())
2192 return true; /* complete */
2195 /* read from future: need to settle this later */
2197 return false; /* incomplete */
2200 if (rf->is_release())
2201 return true; /* complete */
2203 /* else relaxed write
2204 * - check for fence-release in the same thread (29.8, stmt. 3)
2205 * - check modification order for contiguous subsequence
2206 * -> rf must be same thread as release */
2208 const ModelAction *fence_release = rf->get_last_fence_release();
2209 /* Synchronize with a fence-release unconditionally; we don't need to
2210 * find any more "contiguous subsequence..." for it */
2212 release_heads->push_back(fence_release);
2214 int tid = id_to_int(rf->get_tid());
2215 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2216 action_list_t *list = &(*thrd_lists)[tid];
2217 action_list_t::const_reverse_iterator rit;
2219 /* Find rf in the thread list */
2220 rit = std::find(list->rbegin(), list->rend(), rf);
2221 ASSERT(rit != list->rend());
2223 /* Find the last {write,fence}-release */
2224 for (; rit != list->rend(); rit++) {
2225 if (fence_release && *(*rit) < *fence_release)
2227 if ((*rit)->is_release())
2230 if (rit == list->rend()) {
2231 /* No write-release in this thread */
2232 return true; /* complete */
2233 } else if (fence_release && *(*rit) < *fence_release) {
2234 /* The fence-release is more recent (and so, "stronger") than
2235 * the most recent write-release */
2236 return true; /* complete */
2237 } /* else, need to establish contiguous release sequence */
2238 ModelAction *release = *rit;
2240 ASSERT(rf->same_thread(release));
2242 pending->writes.clear();
2244 bool certain = true;
2245 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2246 if (id_to_int(rf->get_tid()) == (int)i)
2248 list = &(*thrd_lists)[i];
2250 /* Can we ensure no future writes from this thread may break
2251 * the release seq? */
2252 bool future_ordered = false;
2254 ModelAction *last = get_last_action(int_to_id(i));
2255 Thread *th = get_thread(int_to_id(i));
2256 if ((last && rf->happens_before(last)) ||
2259 future_ordered = true;
2261 ASSERT(!th->is_model_thread() || future_ordered);
2263 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2264 const ModelAction *act = *rit;
2265 /* Reach synchronization -> this thread is complete */
2266 if (act->happens_before(release))
2268 if (rf->happens_before(act)) {
2269 future_ordered = true;
2273 /* Only non-RMW writes can break release sequences */
2274 if (!act->is_write() || act->is_rmw())
2277 /* Check modification order */
2278 if (mo_graph->checkReachable(rf, act)) {
2279 /* rf --mo--> act */
2280 future_ordered = true;
2283 if (mo_graph->checkReachable(act, release))
2284 /* act --mo--> release */
2286 if (mo_graph->checkReachable(release, act) &&
2287 mo_graph->checkReachable(act, rf)) {
2288 /* release --mo-> act --mo--> rf */
2289 return true; /* complete */
2291 /* act may break release sequence */
2292 pending->writes.push_back(act);
2295 if (!future_ordered)
2296 certain = false; /* This thread is uncertain */
2300 release_heads->push_back(release);
2301 pending->writes.clear();
2303 pending->release = release;
2310 * An interface for getting the release sequence head(s) with which a
2311 * given ModelAction must synchronize. This function only returns a non-empty
2312 * result when it can locate a release sequence head with certainty. Otherwise,
2313 * it may mark the internal state of the ModelChecker so that it will handle
2314 * the release sequence at a later time, causing @a acquire to update its
2315 * synchronization at some later point in execution.
2317 * @param acquire The 'acquire' action that may synchronize with a release
2319 * @param read The read action that may read from a release sequence; this may
2320 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2321 * when 'acquire' is a fence-acquire)
2322 * @param release_heads A pass-by-reference return parameter. Will be filled
2323 * with the head(s) of the release sequence(s), if they exists with certainty.
2324 * @see ModelChecker::release_seq_heads
2326 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2327 ModelAction *read, rel_heads_list_t *release_heads)
2329 const ModelAction *rf = read->get_reads_from();
2330 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2331 sequence->acquire = acquire;
2332 sequence->read = read;
2334 if (!release_seq_heads(rf, release_heads, sequence)) {
2335 /* add act to 'lazy checking' list */
2336 pending_rel_seqs->push_back(sequence);
2338 snapshot_free(sequence);
2343 * Attempt to resolve all stashed operations that might synchronize with a
2344 * release sequence for a given location. This implements the "lazy" portion of
2345 * determining whether or not a release sequence was contiguous, since not all
2346 * modification order information is present at the time an action occurs.
2348 * @param location The location/object that should be checked for release
2349 * sequence resolutions. A NULL value means to check all locations.
2350 * @param work_queue The work queue to which to add work items as they are
2352 * @return True if any updates occurred (new synchronization, new mo_graph
2355 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2357 bool updated = false;
2358 SnapVector<struct release_seq *>::iterator it = pending_rel_seqs->begin();
2359 while (it != pending_rel_seqs->end()) {
2360 struct release_seq *pending = *it;
2361 ModelAction *acquire = pending->acquire;
2362 const ModelAction *read = pending->read;
2364 /* Only resolve sequences on the given location, if provided */
2365 if (location && read->get_location() != location) {
2370 const ModelAction *rf = read->get_reads_from();
2371 rel_heads_list_t release_heads;
2373 complete = release_seq_heads(rf, &release_heads, pending);
2374 for (unsigned int i = 0; i < release_heads.size(); i++) {
2375 if (!acquire->has_synchronized_with(release_heads[i])) {
2376 if (acquire->synchronize_with(release_heads[i]))
2379 set_bad_synchronization();
2384 /* Re-check all pending release sequences */
2385 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2386 /* Re-check read-acquire for mo_graph edges */
2387 if (acquire->is_read())
2388 work_queue->push_back(MOEdgeWorkEntry(acquire));
2390 /* propagate synchronization to later actions */
2391 action_list_t::reverse_iterator rit = action_trace->rbegin();
2392 for (; (*rit) != acquire; rit++) {
2393 ModelAction *propagate = *rit;
2394 if (acquire->happens_before(propagate)) {
2395 propagate->synchronize_with(acquire);
2396 /* Re-check 'propagate' for mo_graph edges */
2397 work_queue->push_back(MOEdgeWorkEntry(propagate));
2402 it = pending_rel_seqs->erase(it);
2403 snapshot_free(pending);
2409 // If we resolved promises or data races, see if we have realized a data race.
2416 * Performs various bookkeeping operations for the current ModelAction. For
2417 * instance, adds action to the per-object, per-thread action vector and to the
2418 * action trace list of all thread actions.
2420 * @param act is the ModelAction to add.
2422 void ModelChecker::add_action_to_lists(ModelAction *act)
2424 int tid = id_to_int(act->get_tid());
2425 ModelAction *uninit = NULL;
2427 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2428 if (list->empty() && act->is_atomic_var()) {
2429 uninit = get_uninitialized_action(act);
2430 uninit_id = id_to_int(uninit->get_tid());
2431 list->push_front(uninit);
2433 list->push_back(act);
2435 action_trace->push_back(act);
2437 action_trace->push_front(uninit);
2439 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2440 if (tid >= (int)vec->size())
2441 vec->resize(priv->next_thread_id);
2442 (*vec)[tid].push_back(act);
2444 (*vec)[uninit_id].push_front(uninit);
2446 if ((int)thrd_last_action->size() <= tid)
2447 thrd_last_action->resize(get_num_threads());
2448 (*thrd_last_action)[tid] = act;
2450 (*thrd_last_action)[uninit_id] = uninit;
2452 if (act->is_fence() && act->is_release()) {
2453 if ((int)thrd_last_fence_release->size() <= tid)
2454 thrd_last_fence_release->resize(get_num_threads());
2455 (*thrd_last_fence_release)[tid] = act;
2458 if (act->is_wait()) {
2459 void *mutex_loc = (void *) act->get_value();
2460 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2462 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2463 if (tid >= (int)vec->size())
2464 vec->resize(priv->next_thread_id);
2465 (*vec)[tid].push_back(act);
2470 * @brief Get the last action performed by a particular Thread
2471 * @param tid The thread ID of the Thread in question
2472 * @return The last action in the thread
2474 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2476 int threadid = id_to_int(tid);
2477 if (threadid < (int)thrd_last_action->size())
2478 return (*thrd_last_action)[id_to_int(tid)];
2484 * @brief Get the last fence release performed by a particular Thread
2485 * @param tid The thread ID of the Thread in question
2486 * @return The last fence release in the thread, if one exists; NULL otherwise
2488 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2490 int threadid = id_to_int(tid);
2491 if (threadid < (int)thrd_last_fence_release->size())
2492 return (*thrd_last_fence_release)[id_to_int(tid)];
2498 * Gets the last memory_order_seq_cst write (in the total global sequence)
2499 * performed on a particular object (i.e., memory location), not including the
2501 * @param curr The current ModelAction; also denotes the object location to
2503 * @return The last seq_cst write
2505 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2507 void *location = curr->get_location();
2508 action_list_t *list = get_safe_ptr_action(obj_map, location);
2509 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2510 action_list_t::reverse_iterator rit;
2511 for (rit = list->rbegin(); (*rit) != curr; rit++)
2513 rit++; /* Skip past curr */
2514 for ( ; rit != list->rend(); rit++)
2515 if ((*rit)->is_write() && (*rit)->is_seqcst())
2521 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2522 * performed in a particular thread, prior to a particular fence.
2523 * @param tid The ID of the thread to check
2524 * @param before_fence The fence from which to begin the search; if NULL, then
2525 * search for the most recent fence in the thread.
2526 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2528 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2530 /* All fences should have NULL location */
2531 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2532 action_list_t::reverse_iterator rit = list->rbegin();
2535 for (; rit != list->rend(); rit++)
2536 if (*rit == before_fence)
2539 ASSERT(*rit == before_fence);
2543 for (; rit != list->rend(); rit++)
2544 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2550 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2551 * location). This function identifies the mutex according to the current
2552 * action, which is presumed to perform on the same mutex.
2553 * @param curr The current ModelAction; also denotes the object location to
2555 * @return The last unlock operation
2557 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2559 void *location = curr->get_location();
2560 action_list_t *list = get_safe_ptr_action(obj_map, location);
2561 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2562 action_list_t::reverse_iterator rit;
2563 for (rit = list->rbegin(); rit != list->rend(); rit++)
2564 if ((*rit)->is_unlock() || (*rit)->is_wait())
2569 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2571 ModelAction *parent = get_last_action(tid);
2573 parent = get_thread(tid)->get_creation();
2578 * Returns the clock vector for a given thread.
2579 * @param tid The thread whose clock vector we want
2580 * @return Desired clock vector
2582 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2584 return get_parent_action(tid)->get_cv();
2588 * @brief Find the promise (if any) to resolve for the current action and
2589 * remove it from the pending promise vector
2590 * @param curr The current ModelAction. Should be a write.
2591 * @return The Promise to resolve, if any; otherwise NULL
2593 Promise * ModelChecker::pop_promise_to_resolve(const ModelAction *curr)
2595 for (unsigned int i = 0; i < promises->size(); i++)
2596 if (curr->get_node()->get_promise(i)) {
2597 Promise *ret = (*promises)[i];
2598 promises->erase(promises->begin() + i);
2605 * Resolve a Promise with a current write.
2606 * @param write The ModelAction that is fulfilling Promises
2607 * @param promise The Promise to resolve
2608 * @return True if the Promise was successfully resolved; false otherwise
2610 bool ModelChecker::resolve_promise(ModelAction *write, Promise *promise)
2612 ModelVector<ModelAction *> actions_to_check;
2614 for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2615 ModelAction *read = promise->get_reader(i);
2616 read_from(read, write);
2617 actions_to_check.push_back(read);
2619 /* Make sure the promise's value matches the write's value */
2620 ASSERT(promise->is_compatible(write) && promise->same_value(write));
2621 if (!mo_graph->resolvePromise(promise, write))
2622 priv->failed_promise = true;
2625 * @todo It is possible to end up in an inconsistent state, where a
2626 * "resolved" promise may still be referenced if
2627 * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2629 * Note that the inconsistency only matters when dumping mo_graph to
2635 //Check whether reading these writes has made threads unable to
2637 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2638 ModelAction *read = actions_to_check[i];
2639 mo_check_promises(read, true);
2646 * Compute the set of promises that could potentially be satisfied by this
2647 * action. Note that the set computation actually appears in the Node, not in
2649 * @param curr The ModelAction that may satisfy promises
2651 void ModelChecker::compute_promises(ModelAction *curr)
2653 for (unsigned int i = 0; i < promises->size(); i++) {
2654 Promise *promise = (*promises)[i];
2655 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2658 bool satisfy = true;
2659 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2660 const ModelAction *act = promise->get_reader(j);
2661 if (act->happens_before(curr) ||
2662 act->could_synchronize_with(curr)) {
2668 curr->get_node()->set_promise(i);
2672 /** Checks promises in response to change in ClockVector Threads. */
2673 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2675 for (unsigned int i = 0; i < promises->size(); i++) {
2676 Promise *promise = (*promises)[i];
2677 if (!promise->thread_is_available(tid))
2679 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2680 const ModelAction *act = promise->get_reader(j);
2681 if ((!old_cv || !old_cv->synchronized_since(act)) &&
2682 merge_cv->synchronized_since(act)) {
2683 if (promise->eliminate_thread(tid)) {
2684 /* Promise has failed */
2685 priv->failed_promise = true;
2693 void ModelChecker::check_promises_thread_disabled()
2695 for (unsigned int i = 0; i < promises->size(); i++) {
2696 Promise *promise = (*promises)[i];
2697 if (promise->has_failed()) {
2698 priv->failed_promise = true;
2705 * @brief Checks promises in response to addition to modification order for
2708 * We test whether threads are still available for satisfying promises after an
2709 * addition to our modification order constraints. Those that are unavailable
2710 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2711 * that promise has failed.
2713 * @param act The ModelAction which updated the modification order
2714 * @param is_read_check Should be true if act is a read and we must check for
2715 * updates to the store from which it read (there is a distinction here for
2716 * RMW's, which are both a load and a store)
2718 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2720 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2722 for (unsigned int i = 0; i < promises->size(); i++) {
2723 Promise *promise = (*promises)[i];
2725 // Is this promise on the same location?
2726 if (!promise->same_location(write))
2729 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2730 const ModelAction *pread = promise->get_reader(j);
2731 if (!pread->happens_before(act))
2733 if (mo_graph->checkPromise(write, promise)) {
2734 priv->failed_promise = true;
2740 // Don't do any lookups twice for the same thread
2741 if (!promise->thread_is_available(act->get_tid()))
2744 if (mo_graph->checkReachable(promise, write)) {
2745 if (mo_graph->checkPromise(write, promise)) {
2746 priv->failed_promise = true;
2754 * Compute the set of writes that may break the current pending release
2755 * sequence. This information is extracted from previou release sequence
2758 * @param curr The current ModelAction. Must be a release sequence fixup
2761 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2763 if (pending_rel_seqs->empty())
2766 struct release_seq *pending = pending_rel_seqs->back();
2767 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2768 const ModelAction *write = pending->writes[i];
2769 curr->get_node()->add_relseq_break(write);
2772 /* NULL means don't break the sequence; just synchronize */
2773 curr->get_node()->add_relseq_break(NULL);
2777 * Build up an initial set of all past writes that this 'read' action may read
2778 * from, as well as any previously-observed future values that must still be valid.
2780 * @param curr is the current ModelAction that we are exploring; it must be a
2783 void ModelChecker::build_may_read_from(ModelAction *curr)
2785 SnapVector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2787 ASSERT(curr->is_read());
2789 ModelAction *last_sc_write = NULL;
2791 if (curr->is_seqcst())
2792 last_sc_write = get_last_seq_cst_write(curr);
2794 /* Iterate over all threads */
2795 for (i = 0; i < thrd_lists->size(); i++) {
2796 /* Iterate over actions in thread, starting from most recent */
2797 action_list_t *list = &(*thrd_lists)[i];
2798 action_list_t::reverse_iterator rit;
2799 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2800 ModelAction *act = *rit;
2802 /* Only consider 'write' actions */
2803 if (!act->is_write() || act == curr)
2806 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2807 bool allow_read = true;
2809 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2811 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2815 /* Only add feasible reads */
2816 mo_graph->startChanges();
2817 r_modification_order(curr, act);
2818 if (!is_infeasible())
2819 curr->get_node()->add_read_from_past(act);
2820 mo_graph->rollbackChanges();
2823 /* Include at most one act per-thread that "happens before" curr */
2824 if (act->happens_before(curr))
2829 /* Inherit existing, promised future values */
2830 for (i = 0; i < promises->size(); i++) {
2831 const Promise *promise = (*promises)[i];
2832 const ModelAction *promise_read = promise->get_reader(0);
2833 if (promise_read->same_var(curr)) {
2834 /* Only add feasible future-values */
2835 mo_graph->startChanges();
2836 r_modification_order(curr, promise);
2837 if (!is_infeasible())
2838 curr->get_node()->add_read_from_promise(promise_read);
2839 mo_graph->rollbackChanges();
2843 /* We may find no valid may-read-from only if the execution is doomed */
2844 if (!curr->get_node()->read_from_size()) {
2845 priv->no_valid_reads = true;
2849 if (DBG_ENABLED()) {
2850 model_print("Reached read action:\n");
2852 model_print("Printing read_from_past\n");
2853 curr->get_node()->print_read_from_past();
2854 model_print("End printing read_from_past\n");
2858 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2860 for ( ; write != NULL; write = write->get_reads_from()) {
2861 /* UNINIT actions don't have a Node, and they never sleep */
2862 if (write->is_uninitialized())
2864 Node *prevnode = write->get_node()->get_parent();
2866 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2867 if (write->is_release() && thread_sleep)
2869 if (!write->is_rmw())
2876 * @brief Get an action representing an uninitialized atomic
2878 * This function may create a new one or try to retrieve one from the NodeStack
2880 * @param curr The current action, which prompts the creation of an UNINIT action
2881 * @return A pointer to the UNINIT ModelAction
2883 ModelAction * ModelChecker::get_uninitialized_action(const ModelAction *curr) const
2885 Node *node = curr->get_node();
2886 ModelAction *act = node->get_uninit_action();
2888 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), model->params.uninitvalue, model_thread);
2889 node->set_uninit_action(act);
2891 act->create_cv(NULL);
2895 static void print_list(action_list_t *list)
2897 action_list_t::iterator it;
2899 model_print("---------------------------------------------------------------------\n");
2901 unsigned int hash = 0;
2903 for (it = list->begin(); it != list->end(); it++) {
2904 const ModelAction *act = *it;
2905 if (act->get_seq_number() > 0)
2907 hash = hash^(hash<<3)^((*it)->hash());
2909 model_print("HASH %u\n", hash);
2910 model_print("---------------------------------------------------------------------\n");
2913 #if SUPPORT_MOD_ORDER_DUMP
2914 void ModelChecker::dumpGraph(char *filename) const
2917 sprintf(buffer, "%s.dot", filename);
2918 FILE *file = fopen(buffer, "w");
2919 fprintf(file, "digraph %s {\n", filename);
2920 mo_graph->dumpNodes(file);
2921 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2923 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2924 ModelAction *act = *it;
2925 if (act->is_read()) {
2926 mo_graph->dot_print_node(file, act);
2927 if (act->get_reads_from())
2928 mo_graph->dot_print_edge(file,
2929 act->get_reads_from(),
2931 "label=\"rf\", color=red, weight=2");
2933 mo_graph->dot_print_edge(file,
2934 act->get_reads_from_promise(),
2936 "label=\"rf\", color=red");
2938 if (thread_array[act->get_tid()]) {
2939 mo_graph->dot_print_edge(file,
2940 thread_array[id_to_int(act->get_tid())],
2942 "label=\"sb\", color=blue, weight=400");
2945 thread_array[act->get_tid()] = act;
2947 fprintf(file, "}\n");
2948 model_free(thread_array);
2953 /** @brief Prints an execution trace summary. */
2954 void ModelChecker::print_summary() const
2956 #if SUPPORT_MOD_ORDER_DUMP
2957 char buffername[100];
2958 sprintf(buffername, "exec%04u", stats.num_total);
2959 mo_graph->dumpGraphToFile(buffername);
2960 sprintf(buffername, "graph%04u", stats.num_total);
2961 dumpGraph(buffername);
2964 model_print("Execution %d:", stats.num_total);
2965 if (isfeasibleprefix()) {
2966 if (scheduler->all_threads_sleeping())
2967 model_print(" SLEEP-SET REDUNDANT");
2970 print_infeasibility(" INFEASIBLE");
2971 print_list(action_trace);
2973 if (!promises->empty()) {
2974 model_print("Pending promises:\n");
2975 for (unsigned int i = 0; i < promises->size(); i++) {
2976 model_print(" [P%u] ", i);
2977 (*promises)[i]->print();
2984 * Add a Thread to the system for the first time. Should only be called once
2986 * @param t The Thread to add
2988 void ModelChecker::add_thread(Thread *t)
2990 thread_map->put(id_to_int(t->get_id()), t);
2991 scheduler->add_thread(t);
2995 * @brief Get a Thread reference by its ID
2996 * @param tid The Thread's ID
2997 * @return A Thread reference
2999 Thread * ModelChecker::get_thread(thread_id_t tid) const
3001 return thread_map->get(id_to_int(tid));
3005 * @brief Get a reference to the Thread in which a ModelAction was executed
3006 * @param act The ModelAction
3007 * @return A Thread reference
3009 Thread * ModelChecker::get_thread(const ModelAction *act) const
3011 return get_thread(act->get_tid());
3015 * @brief Get a Promise's "promise number"
3017 * A "promise number" is an index number that is unique to a promise, valid
3018 * only for a specific snapshot of an execution trace. Promises may come and go
3019 * as they are generated an resolved, so an index only retains meaning for the
3022 * @param promise The Promise to check
3023 * @return The promise index, if the promise still is valid; otherwise -1
3025 int ModelChecker::get_promise_number(const Promise *promise) const
3027 for (unsigned int i = 0; i < promises->size(); i++)
3028 if ((*promises)[i] == promise)
3035 * @brief Check if a Thread is currently enabled
3036 * @param t The Thread to check
3037 * @return True if the Thread is currently enabled
3039 bool ModelChecker::is_enabled(Thread *t) const
3041 return scheduler->is_enabled(t);
3045 * @brief Check if a Thread is currently enabled
3046 * @param tid The ID of the Thread to check
3047 * @return True if the Thread is currently enabled
3049 bool ModelChecker::is_enabled(thread_id_t tid) const
3051 return scheduler->is_enabled(tid);
3055 * Switch from a model-checker context to a user-thread context. This is the
3056 * complement of ModelChecker::switch_to_master and must be called from the
3057 * model-checker context
3059 * @param thread The user-thread to switch to
3061 void ModelChecker::switch_from_master(Thread *thread)
3063 scheduler->set_current_thread(thread);
3064 Thread::swap(&system_context, thread);
3068 * Switch from a user-context to the "master thread" context (a.k.a. system
3069 * context). This switch is made with the intention of exploring a particular
3070 * model-checking action (described by a ModelAction object). Must be called
3071 * from a user-thread context.
3073 * @param act The current action that will be explored. May be NULL only if
3074 * trace is exiting via an assertion (see ModelChecker::set_assert and
3075 * ModelChecker::has_asserted).
3076 * @return Return the value returned by the current action
3078 uint64_t ModelChecker::switch_to_master(ModelAction *act)
3081 Thread *old = thread_current();
3082 scheduler->set_current_thread(NULL);
3083 ASSERT(!old->get_pending());
3084 old->set_pending(act);
3085 if (Thread::swap(old, &system_context) < 0) {
3086 perror("swap threads");
3089 return old->get_return_value();
3093 * Takes the next step in the execution, if possible.
3094 * @param curr The current step to take
3095 * @return Returns the next Thread to run, if any; NULL if this execution
3098 Thread * ModelChecker::take_step(ModelAction *curr)
3100 Thread *curr_thrd = get_thread(curr);
3101 ASSERT(curr_thrd->get_state() == THREAD_READY);
3103 ASSERT(check_action_enabled(curr)); /* May have side effects? */
3104 curr = check_current_action(curr);
3107 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
3108 scheduler->remove_thread(curr_thrd);
3110 return action_select_next_thread(curr);
3113 /** Wrapper to run the user's main function, with appropriate arguments */
3114 void user_main_wrapper(void *)
3116 user_main(model->params.argc, model->params.argv);
3119 bool ModelChecker::should_terminate_execution()
3121 /* Infeasible -> don't take any more steps */
3122 if (is_infeasible())
3124 else if (isfeasibleprefix() && have_bug_reports()) {
3129 if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
3134 /** @brief Run ModelChecker for the user program */
3135 void ModelChecker::run()
3139 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
3144 * Stash next pending action(s) for thread(s). There
3145 * should only need to stash one thread's action--the
3146 * thread which just took a step--plus the first step
3147 * for any newly-created thread
3149 for (unsigned int i = 0; i < get_num_threads(); i++) {
3150 thread_id_t tid = int_to_id(i);
3151 Thread *thr = get_thread(tid);
3152 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3153 switch_from_master(thr);
3154 if (thr->is_waiting_on(thr))
3155 assert_bug("Deadlock detected (thread %u)", i);
3159 /* Don't schedule threads which should be disabled */
3160 for (unsigned int i = 0; i < get_num_threads(); i++) {
3161 Thread *th = get_thread(int_to_id(i));
3162 ModelAction *act = th->get_pending();
3163 if (act && is_enabled(th) && !check_action_enabled(act)) {
3164 scheduler->sleep(th);
3168 /* Catch assertions from prior take_step or from
3169 * between-ModelAction bugs (e.g., data races) */
3174 t = get_next_thread();
3175 if (!t || t->is_model_thread())
3178 /* Consume the next action for a Thread */
3179 ModelAction *curr = t->get_pending();
3180 t->set_pending(NULL);
3181 t = take_step(curr);
3182 } while (!should_terminate_execution());
3185 * Launch end-of-execution release sequence fixups only when
3186 * the execution is otherwise feasible AND there are:
3188 * (1) pending release sequences
3189 * (2) pending assertions that could be invalidated by a change
3190 * in clock vectors (i.e., data races)
3191 * (3) no pending promises
3193 while (!pending_rel_seqs->empty() &&
3194 is_feasible_prefix_ignore_relseq() &&
3195 !unrealizedraces.empty()) {
3196 model_print("*** WARNING: release sequence fixup action "
3197 "(%zu pending release seuqence(s)) ***\n",
3198 pending_rel_seqs->size());
3199 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3200 std::memory_order_seq_cst, NULL, VALUE_NONE,
3204 } while (next_execution());
3206 model_print("******* Model-checking complete: *******\n");