10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
43 /* First thread created will have id INITIAL_THREAD_ID */
44 next_thread_id(INITIAL_THREAD_ID),
45 used_sequence_numbers(0),
49 failed_promise(false),
50 too_many_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 ModelAction *current_action;
62 unsigned int next_thread_id;
63 modelclock_t used_sequence_numbers;
64 ModelAction *next_backtrack;
65 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
66 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94 node_stack(new NodeStack()),
95 priv(new struct model_snapshot_members()),
96 mo_graph(new CycleGraph())
98 /* Initialize a model-checker thread, for special ModelActions */
99 model_thread = new Thread(get_next_id());
100 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
106 for (unsigned int i = 0; i < get_num_threads(); i++)
107 delete thread_map->get(i);
112 delete lock_waiters_map;
113 delete condvar_waiters_map;
116 for (unsigned int i = 0; i < promises->size(); i++)
117 delete (*promises)[i];
120 delete pending_rel_seqs;
122 delete thrd_last_action;
123 delete thrd_last_fence_release;
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
132 action_list_t *tmp = hash->get(ptr);
134 tmp = new action_list_t();
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
142 std::vector<action_list_t> *tmp = hash->get(ptr);
144 tmp = new std::vector<action_list_t>();
151 * Restores user program to initial state and resets all model-checker data
154 void ModelChecker::reset_to_initial_state()
156 DEBUG("+++ Resetting to initial state +++\n");
157 node_stack->reset_execution();
159 /* Print all model-checker output before rollback */
162 snapshot_backtrack_before(0);
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
168 return priv->next_thread_id++;
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
174 return priv->next_thread_id;
178 * Must be called from user-thread context (e.g., through the global
179 * thread_current() interface)
181 * @return The currently executing Thread.
183 Thread * ModelChecker::get_current_thread() const
185 return scheduler->get_current_thread();
188 /** @return a sequence number for a new ModelAction */
189 modelclock_t ModelChecker::get_next_seq_num()
191 return ++priv->used_sequence_numbers;
194 Node * ModelChecker::get_curr_node() const
196 return node_stack->get_head();
200 * @brief Choose the next thread to execute.
202 * This function chooses the next thread that should execute. It can force the
203 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
204 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
205 * The model-checker may have no preference regarding the next thread (i.e.,
206 * when exploring a new execution ordering), in which case this will return
208 * @param curr The current ModelAction. This action might guide the choice of
210 * @return The next thread to run. If the model-checker has no preference, NULL.
212 Thread * ModelChecker::get_next_thread(ModelAction *curr)
217 /* Do not split atomic actions. */
219 return thread_current();
220 else if (curr->get_type() == THREAD_CREATE)
221 return curr->get_thread_operand();
224 /* Have we completed exploring the preselected path? */
228 /* Else, we are trying to replay an execution */
229 ModelAction *next = node_stack->get_next()->get_action();
231 if (next == diverge) {
232 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
233 earliest_diverge = diverge;
235 Node *nextnode = next->get_node();
236 Node *prevnode = nextnode->get_parent();
237 scheduler->update_sleep_set(prevnode);
239 /* Reached divergence point */
240 if (nextnode->increment_misc()) {
241 /* The next node will try to satisfy a different misc_index values. */
242 tid = next->get_tid();
243 node_stack->pop_restofstack(2);
244 } else if (nextnode->increment_promise()) {
245 /* The next node will try to satisfy a different set of promises. */
246 tid = next->get_tid();
247 node_stack->pop_restofstack(2);
248 } else if (nextnode->increment_read_from()) {
249 /* The next node will read from a different value. */
250 tid = next->get_tid();
251 node_stack->pop_restofstack(2);
252 } else if (nextnode->increment_future_value()) {
253 /* The next node will try to read from a different future value. */
254 tid = next->get_tid();
255 node_stack->pop_restofstack(2);
256 } else if (nextnode->increment_relseq_break()) {
257 /* The next node will try to resolve a release sequence differently */
258 tid = next->get_tid();
259 node_stack->pop_restofstack(2);
262 /* Make a different thread execute for next step */
263 scheduler->add_sleep(get_thread(next->get_tid()));
264 tid = prevnode->get_next_backtrack();
265 /* Make sure the backtracked thread isn't sleeping. */
266 node_stack->pop_restofstack(1);
267 if (diverge == earliest_diverge) {
268 earliest_diverge = prevnode->get_action();
271 /* The correct sleep set is in the parent node. */
274 DEBUG("*** Divergence point ***\n");
278 tid = next->get_tid();
280 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
281 ASSERT(tid != THREAD_ID_T_NONE);
282 return thread_map->get(id_to_int(tid));
286 * We need to know what the next actions of all threads in the sleep
287 * set will be. This method computes them and stores the actions at
288 * the corresponding thread object's pending action.
291 void ModelChecker::execute_sleep_set()
293 for (unsigned int i = 0; i < get_num_threads(); i++) {
294 thread_id_t tid = int_to_id(i);
295 Thread *thr = get_thread(tid);
296 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
297 thr->set_state(THREAD_RUNNING);
298 scheduler->next_thread(thr);
299 Thread::swap(&system_context, thr);
300 priv->current_action->set_sleep_flag();
301 thr->set_pending(priv->current_action);
306 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
308 for (unsigned int i = 0; i < get_num_threads(); i++) {
309 Thread *thr = get_thread(int_to_id(i));
310 if (scheduler->is_sleep_set(thr)) {
311 ModelAction *pending_act = thr->get_pending();
312 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
313 //Remove this thread from sleep set
314 scheduler->remove_sleep(thr);
319 /** @brief Alert the model-checker that an incorrectly-ordered
320 * synchronization was made */
321 void ModelChecker::set_bad_synchronization()
323 priv->bad_synchronization = true;
326 bool ModelChecker::has_asserted() const
328 return priv->asserted;
331 void ModelChecker::set_assert()
333 priv->asserted = true;
337 * Check if we are in a deadlock. Should only be called at the end of an
338 * execution, although it should not give false positives in the middle of an
339 * execution (there should be some ENABLED thread).
341 * @return True if program is in a deadlock; false otherwise
343 bool ModelChecker::is_deadlocked() const
345 bool blocking_threads = false;
346 for (unsigned int i = 0; i < get_num_threads(); i++) {
347 thread_id_t tid = int_to_id(i);
350 Thread *t = get_thread(tid);
351 if (!t->is_model_thread() && t->get_pending())
352 blocking_threads = true;
354 return blocking_threads;
358 * Check if this is a complete execution. That is, have all thread completed
359 * execution (rather than exiting because sleep sets have forced a redundant
362 * @return True if the execution is complete.
364 bool ModelChecker::is_complete_execution() const
366 for (unsigned int i = 0; i < get_num_threads(); i++)
367 if (is_enabled(int_to_id(i)))
373 * @brief Assert a bug in the executing program.
375 * Use this function to assert any sort of bug in the user program. If the
376 * current trace is feasible (actually, a prefix of some feasible execution),
377 * then this execution will be aborted, printing the appropriate message. If
378 * the current trace is not yet feasible, the error message will be stashed and
379 * printed if the execution ever becomes feasible.
381 * @param msg Descriptive message for the bug (do not include newline char)
382 * @return True if bug is immediately-feasible
384 bool ModelChecker::assert_bug(const char *msg)
386 priv->bugs.push_back(new bug_message(msg));
388 if (isfeasibleprefix()) {
396 * @brief Assert a bug in the executing program, asserted by a user thread
397 * @see ModelChecker::assert_bug
398 * @param msg Descriptive message for the bug (do not include newline char)
400 void ModelChecker::assert_user_bug(const char *msg)
402 /* If feasible bug, bail out now */
404 switch_to_master(NULL);
407 /** @return True, if any bugs have been reported for this execution */
408 bool ModelChecker::have_bug_reports() const
410 return priv->bugs.size() != 0;
413 /** @brief Print bug report listing for this execution (if any bugs exist) */
414 void ModelChecker::print_bugs() const
416 if (have_bug_reports()) {
417 model_print("Bug report: %zu bug%s detected\n",
419 priv->bugs.size() > 1 ? "s" : "");
420 for (unsigned int i = 0; i < priv->bugs.size(); i++)
421 priv->bugs[i]->print();
426 * @brief Record end-of-execution stats
428 * Must be run when exiting an execution. Records various stats.
429 * @see struct execution_stats
431 void ModelChecker::record_stats()
434 if (!isfeasibleprefix())
435 stats.num_infeasible++;
436 else if (have_bug_reports())
437 stats.num_buggy_executions++;
438 else if (is_complete_execution())
439 stats.num_complete++;
441 stats.num_redundant++;
444 /** @brief Print execution stats */
445 void ModelChecker::print_stats() const
447 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
448 model_print("Number of redundant executions: %d\n", stats.num_redundant);
449 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
450 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
451 model_print("Total executions: %d\n", stats.num_total);
452 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
456 * @brief End-of-exeuction print
457 * @param printbugs Should any existing bugs be printed?
459 void ModelChecker::print_execution(bool printbugs) const
461 print_program_output();
463 if (DBG_ENABLED() || params.verbose) {
464 model_print("Earliest divergence point since last feasible execution:\n");
465 if (earliest_diverge)
466 earliest_diverge->print();
468 model_print("(Not set)\n");
474 /* Don't print invalid bugs */
483 * Queries the model-checker for more executions to explore and, if one
484 * exists, resets the model-checker state to execute a new execution.
486 * @return If there are more executions to explore, return true. Otherwise,
489 bool ModelChecker::next_execution()
492 /* Is this execution a feasible execution that's worth bug-checking? */
493 bool complete = isfeasibleprefix() && (is_complete_execution() ||
496 /* End-of-execution bug checks */
499 assert_bug("Deadlock detected");
507 if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
508 print_execution(complete);
510 clear_program_output();
513 earliest_diverge = NULL;
515 if ((diverge = get_next_backtrack()) == NULL)
519 model_print("Next execution will diverge at:\n");
523 reset_to_initial_state();
527 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
529 switch (act->get_type()) {
534 /* Optimization: relaxed operations don't need backtracking */
535 if (act->is_relaxed())
537 /* linear search: from most recent to oldest */
538 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
539 action_list_t::reverse_iterator rit;
540 for (rit = list->rbegin(); rit != list->rend(); rit++) {
541 ModelAction *prev = *rit;
542 if (prev->could_synchronize_with(act))
548 case ATOMIC_TRYLOCK: {
549 /* linear search: from most recent to oldest */
550 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
551 action_list_t::reverse_iterator rit;
552 for (rit = list->rbegin(); rit != list->rend(); rit++) {
553 ModelAction *prev = *rit;
554 if (act->is_conflicting_lock(prev))
559 case ATOMIC_UNLOCK: {
560 /* linear search: from most recent to oldest */
561 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
562 action_list_t::reverse_iterator rit;
563 for (rit = list->rbegin(); rit != list->rend(); rit++) {
564 ModelAction *prev = *rit;
565 if (!act->same_thread(prev) && prev->is_failed_trylock())
571 /* linear search: from most recent to oldest */
572 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
573 action_list_t::reverse_iterator rit;
574 for (rit = list->rbegin(); rit != list->rend(); rit++) {
575 ModelAction *prev = *rit;
576 if (!act->same_thread(prev) && prev->is_failed_trylock())
578 if (!act->same_thread(prev) && prev->is_notify())
584 case ATOMIC_NOTIFY_ALL:
585 case ATOMIC_NOTIFY_ONE: {
586 /* linear search: from most recent to oldest */
587 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
588 action_list_t::reverse_iterator rit;
589 for (rit = list->rbegin(); rit != list->rend(); rit++) {
590 ModelAction *prev = *rit;
591 if (!act->same_thread(prev) && prev->is_wait())
602 /** This method finds backtracking points where we should try to
603 * reorder the parameter ModelAction against.
605 * @param the ModelAction to find backtracking points for.
607 void ModelChecker::set_backtracking(ModelAction *act)
609 Thread *t = get_thread(act);
610 ModelAction *prev = get_last_conflict(act);
614 Node *node = prev->get_node()->get_parent();
616 int low_tid, high_tid;
617 if (node->is_enabled(t)) {
618 low_tid = id_to_int(act->get_tid());
619 high_tid = low_tid + 1;
622 high_tid = get_num_threads();
625 for (int i = low_tid; i < high_tid; i++) {
626 thread_id_t tid = int_to_id(i);
628 /* Make sure this thread can be enabled here. */
629 if (i >= node->get_num_threads())
632 /* Don't backtrack into a point where the thread is disabled or sleeping. */
633 if (node->enabled_status(tid) != THREAD_ENABLED)
636 /* Check if this has been explored already */
637 if (node->has_been_explored(tid))
640 /* See if fairness allows */
641 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
643 for (int t = 0; t < node->get_num_threads(); t++) {
644 thread_id_t tother = int_to_id(t);
645 if (node->is_enabled(tother) && node->has_priority(tother)) {
653 /* Cache the latest backtracking point */
654 set_latest_backtrack(prev);
656 /* If this is a new backtracking point, mark the tree */
657 if (!node->set_backtrack(tid))
659 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
660 id_to_int(prev->get_tid()),
661 id_to_int(t->get_id()));
670 * @brief Cache the a backtracking point as the "most recent", if eligible
672 * Note that this does not prepare the NodeStack for this backtracking
673 * operation, it only caches the action on a per-execution basis
675 * @param act The operation at which we should explore a different next action
676 * (i.e., backtracking point)
677 * @return True, if this action is now the most recent backtracking point;
680 bool ModelChecker::set_latest_backtrack(ModelAction *act)
682 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
683 priv->next_backtrack = act;
690 * Returns last backtracking point. The model checker will explore a different
691 * path for this point in the next execution.
692 * @return The ModelAction at which the next execution should diverge.
694 ModelAction * ModelChecker::get_next_backtrack()
696 ModelAction *next = priv->next_backtrack;
697 priv->next_backtrack = NULL;
702 * Processes a read or rmw model action.
703 * @param curr is the read model action to process.
704 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
705 * @return True if processing this read updates the mo_graph.
707 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
709 uint64_t value = VALUE_NONE;
710 bool updated = false;
712 const ModelAction *reads_from = curr->get_node()->get_read_from();
713 if (reads_from != NULL) {
714 mo_graph->startChanges();
716 value = reads_from->get_value();
717 bool r_status = false;
719 if (!second_part_of_rmw) {
720 check_recency(curr, reads_from);
721 r_status = r_modification_order(curr, reads_from);
725 if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
726 mo_graph->rollbackChanges();
727 priv->too_many_reads = false;
731 read_from(curr, reads_from);
732 mo_graph->commitChanges();
733 mo_check_promises(curr->get_tid(), reads_from);
736 } else if (!second_part_of_rmw) {
737 /* Read from future value */
738 value = curr->get_node()->get_future_value();
739 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
740 curr->set_read_from(NULL);
741 Promise *valuepromise = new Promise(curr, value, expiration);
742 promises->push_back(valuepromise);
744 get_thread(curr)->set_return_value(value);
750 * Processes a lock, trylock, or unlock model action. @param curr is
751 * the read model action to process.
753 * The try lock operation checks whether the lock is taken. If not,
754 * it falls to the normal lock operation case. If so, it returns
757 * The lock operation has already been checked that it is enabled, so
758 * it just grabs the lock and synchronizes with the previous unlock.
760 * The unlock operation has to re-enable all of the threads that are
761 * waiting on the lock.
763 * @return True if synchronization was updated; false otherwise
765 bool ModelChecker::process_mutex(ModelAction *curr)
767 std::mutex *mutex = NULL;
768 struct std::mutex_state *state = NULL;
770 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
771 mutex = (std::mutex *)curr->get_location();
772 state = mutex->get_state();
773 } else if (curr->is_wait()) {
774 mutex = (std::mutex *)curr->get_value();
775 state = mutex->get_state();
778 switch (curr->get_type()) {
779 case ATOMIC_TRYLOCK: {
780 bool success = !state->islocked;
781 curr->set_try_lock(success);
783 get_thread(curr)->set_return_value(0);
786 get_thread(curr)->set_return_value(1);
788 //otherwise fall into the lock case
790 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
791 assert_bug("Lock access before initialization");
792 state->islocked = true;
793 ModelAction *unlock = get_last_unlock(curr);
794 //synchronize with the previous unlock statement
795 if (unlock != NULL) {
796 curr->synchronize_with(unlock);
801 case ATOMIC_UNLOCK: {
803 state->islocked = false;
804 //wake up the other threads
805 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
806 //activate all the waiting threads
807 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
808 scheduler->wake(get_thread(*rit));
815 state->islocked = false;
816 //wake up the other threads
817 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
818 //activate all the waiting threads
819 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
820 scheduler->wake(get_thread(*rit));
823 //check whether we should go to sleep or not...simulate spurious failures
824 if (curr->get_node()->get_misc() == 0) {
825 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
827 scheduler->sleep(get_thread(curr));
831 case ATOMIC_NOTIFY_ALL: {
832 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
833 //activate all the waiting threads
834 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
835 scheduler->wake(get_thread(*rit));
840 case ATOMIC_NOTIFY_ONE: {
841 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
842 int wakeupthread = curr->get_node()->get_misc();
843 action_list_t::iterator it = waiters->begin();
844 advance(it, wakeupthread);
845 scheduler->wake(get_thread(*it));
856 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
858 /* Do more ambitious checks now that mo is more complete */
859 if (mo_may_allow(writer, reader) &&
860 reader->get_node()->add_future_value(writer->get_value(),
861 writer->get_seq_number() + params.maxfuturedelay))
862 set_latest_backtrack(reader);
866 * Process a write ModelAction
867 * @param curr The ModelAction to process
868 * @return True if the mo_graph was updated or promises were resolved
870 bool ModelChecker::process_write(ModelAction *curr)
872 bool updated_mod_order = w_modification_order(curr);
873 bool updated_promises = resolve_promises(curr);
875 if (promises->size() == 0) {
876 for (unsigned int i = 0; i < futurevalues->size(); i++) {
877 struct PendingFutureValue pfv = (*futurevalues)[i];
878 add_future_value(pfv.writer, pfv.act);
880 futurevalues->clear();
883 mo_graph->commitChanges();
884 mo_check_promises(curr->get_tid(), curr);
886 get_thread(curr)->set_return_value(VALUE_NONE);
887 return updated_mod_order || updated_promises;
891 * Process a fence ModelAction
892 * @param curr The ModelAction to process
893 * @return True if synchronization was updated
895 bool ModelChecker::process_fence(ModelAction *curr)
898 * fence-relaxed: no-op
899 * fence-release: only log the occurence (not in this function), for
900 * use in later synchronization
901 * fence-acquire (this function): search for hypothetical release
904 bool updated = false;
905 if (curr->is_acquire()) {
906 action_list_t *list = action_trace;
907 action_list_t::reverse_iterator rit;
908 /* Find X : is_read(X) && X --sb-> curr */
909 for (rit = list->rbegin(); rit != list->rend(); rit++) {
910 ModelAction *act = *rit;
913 if (act->get_tid() != curr->get_tid())
915 /* Stop at the beginning of the thread */
916 if (act->is_thread_start())
918 /* Stop once we reach a prior fence-acquire */
919 if (act->is_fence() && act->is_acquire())
923 /* read-acquire will find its own release sequences */
924 if (act->is_acquire())
927 /* Establish hypothetical release sequences */
928 rel_heads_list_t release_heads;
929 get_release_seq_heads(curr, act, &release_heads);
930 for (unsigned int i = 0; i < release_heads.size(); i++)
931 if (!curr->synchronize_with(release_heads[i]))
932 set_bad_synchronization();
933 if (release_heads.size() != 0)
941 * @brief Process the current action for thread-related activity
943 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
944 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
945 * synchronization, etc. This function is a no-op for non-THREAD actions
946 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
948 * @param curr The current action
949 * @return True if synchronization was updated or a thread completed
951 bool ModelChecker::process_thread_action(ModelAction *curr)
953 bool updated = false;
955 switch (curr->get_type()) {
956 case THREAD_CREATE: {
957 Thread *th = curr->get_thread_operand();
958 th->set_creation(curr);
962 Thread *blocking = curr->get_thread_operand();
963 ModelAction *act = get_last_action(blocking->get_id());
964 curr->synchronize_with(act);
965 updated = true; /* trigger rel-seq checks */
968 case THREAD_FINISH: {
969 Thread *th = get_thread(curr);
970 while (!th->wait_list_empty()) {
971 ModelAction *act = th->pop_wait_list();
972 scheduler->wake(get_thread(act));
975 updated = true; /* trigger rel-seq checks */
979 check_promises(curr->get_tid(), NULL, curr->get_cv());
990 * @brief Process the current action for release sequence fixup activity
992 * Performs model-checker release sequence fixups for the current action,
993 * forcing a single pending release sequence to break (with a given, potential
994 * "loose" write) or to complete (i.e., synchronize). If a pending release
995 * sequence forms a complete release sequence, then we must perform the fixup
996 * synchronization, mo_graph additions, etc.
998 * @param curr The current action; must be a release sequence fixup action
999 * @param work_queue The work queue to which to add work items as they are
1002 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1004 const ModelAction *write = curr->get_node()->get_relseq_break();
1005 struct release_seq *sequence = pending_rel_seqs->back();
1006 pending_rel_seqs->pop_back();
1008 ModelAction *acquire = sequence->acquire;
1009 const ModelAction *rf = sequence->rf;
1010 const ModelAction *release = sequence->release;
1014 ASSERT(release->same_thread(rf));
1016 if (write == NULL) {
1018 * @todo Forcing a synchronization requires that we set
1019 * modification order constraints. For instance, we can't allow
1020 * a fixup sequence in which two separate read-acquire
1021 * operations read from the same sequence, where the first one
1022 * synchronizes and the other doesn't. Essentially, we can't
1023 * allow any writes to insert themselves between 'release' and
1027 /* Must synchronize */
1028 if (!acquire->synchronize_with(release)) {
1029 set_bad_synchronization();
1032 /* Re-check all pending release sequences */
1033 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1034 /* Re-check act for mo_graph edges */
1035 work_queue->push_back(MOEdgeWorkEntry(acquire));
1037 /* propagate synchronization to later actions */
1038 action_list_t::reverse_iterator rit = action_trace->rbegin();
1039 for (; (*rit) != acquire; rit++) {
1040 ModelAction *propagate = *rit;
1041 if (acquire->happens_before(propagate)) {
1042 propagate->synchronize_with(acquire);
1043 /* Re-check 'propagate' for mo_graph edges */
1044 work_queue->push_back(MOEdgeWorkEntry(propagate));
1048 /* Break release sequence with new edges:
1049 * release --mo--> write --mo--> rf */
1050 mo_graph->addEdge(release, write);
1051 mo_graph->addEdge(write, rf);
1054 /* See if we have realized a data race */
1059 * Initialize the current action by performing one or more of the following
1060 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1061 * in the NodeStack, manipulating backtracking sets, allocating and
1062 * initializing clock vectors, and computing the promises to fulfill.
1064 * @param curr The current action, as passed from the user context; may be
1065 * freed/invalidated after the execution of this function, with a different
1066 * action "returned" its place (pass-by-reference)
1067 * @return True if curr is a newly-explored action; false otherwise
1069 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1071 ModelAction *newcurr;
1073 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1074 newcurr = process_rmw(*curr);
1077 if (newcurr->is_rmw())
1078 compute_promises(newcurr);
1084 (*curr)->set_seq_number(get_next_seq_num());
1086 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1088 /* First restore type and order in case of RMW operation */
1089 if ((*curr)->is_rmwr())
1090 newcurr->copy_typeandorder(*curr);
1092 ASSERT((*curr)->get_location() == newcurr->get_location());
1093 newcurr->copy_from_new(*curr);
1095 /* Discard duplicate ModelAction; use action from NodeStack */
1098 /* Always compute new clock vector */
1099 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1102 return false; /* Action was explored previously */
1106 /* Always compute new clock vector */
1107 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1109 /* Assign most recent release fence */
1110 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1113 * Perform one-time actions when pushing new ModelAction onto
1116 if (newcurr->is_write())
1117 compute_promises(newcurr);
1118 else if (newcurr->is_relseq_fixup())
1119 compute_relseq_breakwrites(newcurr);
1120 else if (newcurr->is_wait())
1121 newcurr->get_node()->set_misc_max(2);
1122 else if (newcurr->is_notify_one()) {
1123 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1125 return true; /* This was a new ModelAction */
1130 * @brief Establish reads-from relation between two actions
1132 * Perform basic operations involved with establishing a concrete rf relation,
1133 * including setting the ModelAction data and checking for release sequences.
1135 * @param act The action that is reading (must be a read)
1136 * @param rf The action from which we are reading (must be a write)
1138 * @return True if this read established synchronization
1140 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1142 act->set_read_from(rf);
1143 if (rf != NULL && act->is_acquire()) {
1144 rel_heads_list_t release_heads;
1145 get_release_seq_heads(act, act, &release_heads);
1146 int num_heads = release_heads.size();
1147 for (unsigned int i = 0; i < release_heads.size(); i++)
1148 if (!act->synchronize_with(release_heads[i])) {
1149 set_bad_synchronization();
1152 return num_heads > 0;
1158 * @brief Check whether a model action is enabled.
1160 * Checks whether a lock or join operation would be successful (i.e., is the
1161 * lock already locked, or is the joined thread already complete). If not, put
1162 * the action in a waiter list.
1164 * @param curr is the ModelAction to check whether it is enabled.
1165 * @return a bool that indicates whether the action is enabled.
1167 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1168 if (curr->is_lock()) {
1169 std::mutex *lock = (std::mutex *)curr->get_location();
1170 struct std::mutex_state *state = lock->get_state();
1171 if (state->islocked) {
1172 //Stick the action in the appropriate waiting queue
1173 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1176 } else if (curr->get_type() == THREAD_JOIN) {
1177 Thread *blocking = (Thread *)curr->get_location();
1178 if (!blocking->is_complete()) {
1179 blocking->push_wait_list(curr);
1188 * Stores the ModelAction for the current thread action. Call this
1189 * immediately before switching from user- to system-context to pass
1190 * data between them.
1191 * @param act The ModelAction created by the user-thread action
1193 void ModelChecker::set_current_action(ModelAction *act) {
1194 priv->current_action = act;
1198 * This is the heart of the model checker routine. It performs model-checking
1199 * actions corresponding to a given "current action." Among other processes, it
1200 * calculates reads-from relationships, updates synchronization clock vectors,
1201 * forms a memory_order constraints graph, and handles replay/backtrack
1202 * execution when running permutations of previously-observed executions.
1204 * @param curr The current action to process
1205 * @return The ModelAction that is actually executed; may be different than
1206 * curr; may be NULL, if the current action is not enabled to run
1208 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1211 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1213 if (!check_action_enabled(curr)) {
1214 /* Make the execution look like we chose to run this action
1215 * much later, when a lock/join can succeed */
1216 get_thread(curr)->set_pending(curr);
1217 scheduler->sleep(get_thread(curr));
1221 bool newly_explored = initialize_curr_action(&curr);
1227 wake_up_sleeping_actions(curr);
1229 /* Add the action to lists before any other model-checking tasks */
1230 if (!second_part_of_rmw)
1231 add_action_to_lists(curr);
1233 /* Build may_read_from set for newly-created actions */
1234 if (newly_explored && curr->is_read())
1235 build_reads_from_past(curr);
1237 /* Initialize work_queue with the "current action" work */
1238 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1239 while (!work_queue.empty() && !has_asserted()) {
1240 WorkQueueEntry work = work_queue.front();
1241 work_queue.pop_front();
1243 switch (work.type) {
1244 case WORK_CHECK_CURR_ACTION: {
1245 ModelAction *act = work.action;
1246 bool update = false; /* update this location's release seq's */
1247 bool update_all = false; /* update all release seq's */
1249 if (process_thread_action(curr))
1252 if (act->is_read() && process_read(act, second_part_of_rmw))
1255 if (act->is_write() && process_write(act))
1258 if (act->is_fence() && process_fence(act))
1261 if (act->is_mutex_op() && process_mutex(act))
1264 if (act->is_relseq_fixup())
1265 process_relseq_fixup(curr, &work_queue);
1268 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1270 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1273 case WORK_CHECK_RELEASE_SEQ:
1274 resolve_release_sequences(work.location, &work_queue);
1276 case WORK_CHECK_MO_EDGES: {
1277 /** @todo Complete verification of work_queue */
1278 ModelAction *act = work.action;
1279 bool updated = false;
1281 if (act->is_read()) {
1282 const ModelAction *rf = act->get_reads_from();
1283 if (rf != NULL && r_modification_order(act, rf))
1286 if (act->is_write()) {
1287 if (w_modification_order(act))
1290 mo_graph->commitChanges();
1293 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1302 check_curr_backtracking(curr);
1303 set_backtracking(curr);
1307 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1309 Node *currnode = curr->get_node();
1310 Node *parnode = currnode->get_parent();
1312 if ((parnode && !parnode->backtrack_empty()) ||
1313 !currnode->misc_empty() ||
1314 !currnode->read_from_empty() ||
1315 !currnode->future_value_empty() ||
1316 !currnode->promise_empty() ||
1317 !currnode->relseq_break_empty()) {
1318 set_latest_backtrack(curr);
1322 bool ModelChecker::promises_expired() const
1324 for (unsigned int i = 0; i < promises->size(); i++) {
1325 Promise *promise = (*promises)[i];
1326 if (promise->get_expiration() < priv->used_sequence_numbers)
1333 * This is the strongest feasibility check available.
1334 * @return whether the current trace (partial or complete) must be a prefix of
1337 bool ModelChecker::isfeasibleprefix() const
1339 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1343 * Print disagnostic information about an infeasible execution
1344 * @param prefix A string to prefix the output with; if NULL, then a default
1345 * message prefix will be provided
1347 void ModelChecker::print_infeasibility(const char *prefix) const
1351 if (mo_graph->checkForRMWViolation())
1352 ptr += sprintf(ptr, "[RMW atomicity]");
1353 if (mo_graph->checkForCycles())
1354 ptr += sprintf(ptr, "[mo cycle]");
1355 if (priv->failed_promise)
1356 ptr += sprintf(ptr, "[failed promise]");
1357 if (priv->too_many_reads)
1358 ptr += sprintf(ptr, "[too many reads]");
1359 if (priv->bad_synchronization)
1360 ptr += sprintf(ptr, "[bad sw ordering]");
1361 if (promises_expired())
1362 ptr += sprintf(ptr, "[promise expired]");
1363 if (promises->size() != 0)
1364 ptr += sprintf(ptr, "[unresolved promise]");
1366 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1370 * Returns whether the current completed trace is feasible, except for pending
1371 * release sequences.
1373 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1375 return !is_infeasible() && promises->size() == 0;
1379 * Check if the current partial trace is infeasible. Does not check any
1380 * end-of-execution flags, which might rule out the execution. Thus, this is
1381 * useful only for ruling an execution as infeasible.
1382 * @return whether the current partial trace is infeasible.
1384 bool ModelChecker::is_infeasible() const
1386 return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1390 * Check If the current partial trace is infeasible, while ignoring
1391 * infeasibility related to 2 RMW's reading from the same store. It does not
1392 * check end-of-execution feasibility.
1393 * @see ModelChecker::is_infeasible
1394 * @return whether the current partial trace is infeasible, ignoring multiple
1395 * RMWs reading from the same store.
1397 bool ModelChecker::is_infeasible_ignoreRMW() const
1399 return mo_graph->checkForCycles() || priv->failed_promise ||
1400 priv->too_many_reads || priv->bad_synchronization ||
1404 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1405 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1406 ModelAction *lastread = get_last_action(act->get_tid());
1407 lastread->process_rmw(act);
1408 if (act->is_rmw() && lastread->get_reads_from() != NULL) {
1409 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1410 mo_graph->commitChanges();
1416 * Checks whether a thread has read from the same write for too many times
1417 * without seeing the effects of a later write.
1420 * 1) there must a different write that we could read from that would satisfy the modification order,
1421 * 2) we must have read from the same value in excess of maxreads times, and
1422 * 3) that other write must have been in the reads_from set for maxreads times.
1424 * If so, we decide that the execution is no longer feasible.
1426 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1428 if (params.maxreads != 0) {
1429 if (curr->get_node()->get_read_from_size() <= 1)
1431 //Must make sure that execution is currently feasible... We could
1432 //accidentally clear by rolling back
1433 if (is_infeasible())
1435 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1436 int tid = id_to_int(curr->get_tid());
1439 if ((int)thrd_lists->size() <= tid)
1441 action_list_t *list = &(*thrd_lists)[tid];
1443 action_list_t::reverse_iterator rit = list->rbegin();
1444 /* Skip past curr */
1445 for (; (*rit) != curr; rit++)
1447 /* go past curr now */
1450 action_list_t::reverse_iterator ritcopy = rit;
1451 //See if we have enough reads from the same value
1453 for (; count < params.maxreads; rit++, count++) {
1454 if (rit == list->rend())
1456 ModelAction *act = *rit;
1457 if (!act->is_read())
1460 if (act->get_reads_from() != rf)
1462 if (act->get_node()->get_read_from_size() <= 1)
1465 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1467 const ModelAction *write = curr->get_node()->get_read_from_at(i);
1469 /* Need a different write */
1473 /* Test to see whether this is a feasible write to read from */
1474 mo_graph->startChanges();
1475 r_modification_order(curr, write);
1476 bool feasiblereadfrom = !is_infeasible();
1477 mo_graph->rollbackChanges();
1479 if (!feasiblereadfrom)
1483 bool feasiblewrite = true;
1484 //new we need to see if this write works for everyone
1486 for (int loop = count; loop > 0; loop--, rit++) {
1487 ModelAction *act = *rit;
1488 bool foundvalue = false;
1489 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1490 if (act->get_node()->get_read_from_at(j) == write) {
1496 feasiblewrite = false;
1500 if (feasiblewrite) {
1501 priv->too_many_reads = true;
1509 * Updates the mo_graph with the constraints imposed from the current
1512 * Basic idea is the following: Go through each other thread and find
1513 * the lastest action that happened before our read. Two cases:
1515 * (1) The action is a write => that write must either occur before
1516 * the write we read from or be the write we read from.
1518 * (2) The action is a read => the write that that action read from
1519 * must occur before the write we read from or be the same write.
1521 * @param curr The current action. Must be a read.
1522 * @param rf The action that curr reads from. Must be a write.
1523 * @return True if modification order edges were added; false otherwise
1525 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1527 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1530 ASSERT(curr->is_read());
1532 /* Last SC fence in the current thread */
1533 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1535 /* Iterate over all threads */
1536 for (i = 0; i < thrd_lists->size(); i++) {
1537 /* Last SC fence in thread i */
1538 ModelAction *last_sc_fence_thread_local = NULL;
1539 if (int_to_id((int)i) != curr->get_tid())
1540 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1542 /* Last SC fence in thread i, before last SC fence in current thread */
1543 ModelAction *last_sc_fence_thread_before = NULL;
1544 if (last_sc_fence_local)
1545 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1547 /* Iterate over actions in thread, starting from most recent */
1548 action_list_t *list = &(*thrd_lists)[i];
1549 action_list_t::reverse_iterator rit;
1550 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1551 ModelAction *act = *rit;
1553 if (act->is_write() && act != rf && act != curr) {
1554 /* C++, Section 29.3 statement 5 */
1555 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1556 *act < *last_sc_fence_thread_local) {
1557 mo_graph->addEdge(act, rf);
1561 /* C++, Section 29.3 statement 4 */
1562 else if (act->is_seqcst() && last_sc_fence_local &&
1563 *act < *last_sc_fence_local) {
1564 mo_graph->addEdge(act, rf);
1568 /* C++, Section 29.3 statement 6 */
1569 else if (last_sc_fence_thread_before &&
1570 *act < *last_sc_fence_thread_before) {
1571 mo_graph->addEdge(act, rf);
1578 * Include at most one act per-thread that "happens
1579 * before" curr. Don't consider reflexively.
1581 if (act->happens_before(curr) && act != curr) {
1582 if (act->is_write()) {
1584 mo_graph->addEdge(act, rf);
1588 const ModelAction *prevreadfrom = act->get_reads_from();
1589 //if the previous read is unresolved, keep going...
1590 if (prevreadfrom == NULL)
1593 if (rf != prevreadfrom) {
1594 mo_graph->addEdge(prevreadfrom, rf);
1606 /** This method fixes up the modification order when we resolve a
1607 * promises. The basic problem is that actions that occur after the
1608 * read curr could not property add items to the modification order
1611 * So for each thread, we find the earliest item that happens after
1612 * the read curr. This is the item we have to fix up with additional
1613 * constraints. If that action is write, we add a MO edge between
1614 * the Action rf and that action. If the action is a read, we add a
1615 * MO edge between the Action rf, and whatever the read accessed.
1617 * @param curr is the read ModelAction that we are fixing up MO edges for.
1618 * @param rf is the write ModelAction that curr reads from.
1621 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1623 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1625 ASSERT(curr->is_read());
1627 /* Iterate over all threads */
1628 for (i = 0; i < thrd_lists->size(); i++) {
1629 /* Iterate over actions in thread, starting from most recent */
1630 action_list_t *list = &(*thrd_lists)[i];
1631 action_list_t::reverse_iterator rit;
1632 ModelAction *lastact = NULL;
1634 /* Find last action that happens after curr that is either not curr or a rmw */
1635 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1636 ModelAction *act = *rit;
1637 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1643 /* Include at most one act per-thread that "happens before" curr */
1644 if (lastact != NULL) {
1645 if (lastact == curr) {
1646 //Case 1: The resolved read is a RMW, and we need to make sure
1647 //that the write portion of the RMW mod order after rf
1649 mo_graph->addEdge(rf, lastact);
1650 } else if (lastact->is_read()) {
1651 //Case 2: The resolved read is a normal read and the next
1652 //operation is a read, and we need to make sure the value read
1653 //is mod ordered after rf
1655 const ModelAction *postreadfrom = lastact->get_reads_from();
1656 if (postreadfrom != NULL && rf != postreadfrom)
1657 mo_graph->addEdge(rf, postreadfrom);
1659 //Case 3: The resolved read is a normal read and the next
1660 //operation is a write, and we need to make sure that the
1661 //write is mod ordered after rf
1663 mo_graph->addEdge(rf, lastact);
1671 * Updates the mo_graph with the constraints imposed from the current write.
1673 * Basic idea is the following: Go through each other thread and find
1674 * the lastest action that happened before our write. Two cases:
1676 * (1) The action is a write => that write must occur before
1679 * (2) The action is a read => the write that that action read from
1680 * must occur before the current write.
1682 * This method also handles two other issues:
1684 * (I) Sequential Consistency: Making sure that if the current write is
1685 * seq_cst, that it occurs after the previous seq_cst write.
1687 * (II) Sending the write back to non-synchronizing reads.
1689 * @param curr The current action. Must be a write.
1690 * @return True if modification order edges were added; false otherwise
1692 bool ModelChecker::w_modification_order(ModelAction *curr)
1694 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1697 ASSERT(curr->is_write());
1699 if (curr->is_seqcst()) {
1700 /* We have to at least see the last sequentially consistent write,
1701 so we are initialized. */
1702 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1703 if (last_seq_cst != NULL) {
1704 mo_graph->addEdge(last_seq_cst, curr);
1709 /* Last SC fence in the current thread */
1710 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1712 /* Iterate over all threads */
1713 for (i = 0; i < thrd_lists->size(); i++) {
1714 /* Last SC fence in thread i, before last SC fence in current thread */
1715 ModelAction *last_sc_fence_thread_before = NULL;
1716 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1717 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1719 /* Iterate over actions in thread, starting from most recent */
1720 action_list_t *list = &(*thrd_lists)[i];
1721 action_list_t::reverse_iterator rit;
1722 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1723 ModelAction *act = *rit;
1726 * 1) If RMW and it actually read from something, then we
1727 * already have all relevant edges, so just skip to next
1730 * 2) If RMW and it didn't read from anything, we should
1731 * whatever edge we can get to speed up convergence.
1733 * 3) If normal write, we need to look at earlier actions, so
1734 * continue processing list.
1736 if (curr->is_rmw()) {
1737 if (curr->get_reads_from() != NULL)
1745 /* C++, Section 29.3 statement 7 */
1746 if (last_sc_fence_thread_before && act->is_write() &&
1747 *act < *last_sc_fence_thread_before) {
1748 mo_graph->addEdge(act, curr);
1754 * Include at most one act per-thread that "happens
1757 if (act->happens_before(curr)) {
1759 * Note: if act is RMW, just add edge:
1761 * The following edge should be handled elsewhere:
1762 * readfrom(act) --mo--> act
1764 if (act->is_write())
1765 mo_graph->addEdge(act, curr);
1766 else if (act->is_read()) {
1767 //if previous read accessed a null, just keep going
1768 if (act->get_reads_from() == NULL)
1770 mo_graph->addEdge(act->get_reads_from(), curr);
1774 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1775 !act->same_thread(curr)) {
1776 /* We have an action that:
1777 (1) did not happen before us
1778 (2) is a read and we are a write
1779 (3) cannot synchronize with us
1780 (4) is in a different thread
1782 that read could potentially read from our write. Note that
1783 these checks are overly conservative at this point, we'll
1784 do more checks before actually removing the
1788 if (thin_air_constraint_may_allow(curr, act)) {
1789 if (!is_infeasible() ||
1790 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1791 futurevalues->push_back(PendingFutureValue(curr, act));
1801 /** Arbitrary reads from the future are not allowed. Section 29.3
1802 * part 9 places some constraints. This method checks one result of constraint
1803 * constraint. Others require compiler support. */
1804 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1806 if (!writer->is_rmw())
1809 if (!reader->is_rmw())
1812 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1813 if (search == reader)
1815 if (search->get_tid() == reader->get_tid() &&
1816 search->happens_before(reader))
1824 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1825 * some constraints. This method checks one the following constraint (others
1826 * require compiler support):
1828 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1830 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1832 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1834 /* Iterate over all threads */
1835 for (i = 0; i < thrd_lists->size(); i++) {
1836 const ModelAction *write_after_read = NULL;
1838 /* Iterate over actions in thread, starting from most recent */
1839 action_list_t *list = &(*thrd_lists)[i];
1840 action_list_t::reverse_iterator rit;
1841 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1842 ModelAction *act = *rit;
1844 /* Don't disallow due to act == reader */
1845 if (!reader->happens_before(act) || reader == act)
1847 else if (act->is_write())
1848 write_after_read = act;
1849 else if (act->is_read() && act->get_reads_from() != NULL)
1850 write_after_read = act->get_reads_from();
1853 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1860 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1861 * The ModelAction under consideration is expected to be taking part in
1862 * release/acquire synchronization as an object of the "reads from" relation.
1863 * Note that this can only provide release sequence support for RMW chains
1864 * which do not read from the future, as those actions cannot be traced until
1865 * their "promise" is fulfilled. Similarly, we may not even establish the
1866 * presence of a release sequence with certainty, as some modification order
1867 * constraints may be decided further in the future. Thus, this function
1868 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1869 * and a boolean representing certainty.
1871 * @param rf The action that might be part of a release sequence. Must be a
1873 * @param release_heads A pass-by-reference style return parameter. After
1874 * execution of this function, release_heads will contain the heads of all the
1875 * relevant release sequences, if any exists with certainty
1876 * @param pending A pass-by-reference style return parameter which is only used
1877 * when returning false (i.e., uncertain). Returns most information regarding
1878 * an uncertain release sequence, including any write operations that might
1879 * break the sequence.
1880 * @return true, if the ModelChecker is certain that release_heads is complete;
1883 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1884 rel_heads_list_t *release_heads,
1885 struct release_seq *pending) const
1887 /* Only check for release sequences if there are no cycles */
1888 if (mo_graph->checkForCycles())
1892 ASSERT(rf->is_write());
1894 if (rf->is_release())
1895 release_heads->push_back(rf);
1896 else if (rf->get_last_fence_release())
1897 release_heads->push_back(rf->get_last_fence_release());
1899 break; /* End of RMW chain */
1901 /** @todo Need to be smarter here... In the linux lock
1902 * example, this will run to the beginning of the program for
1904 /** @todo The way to be smarter here is to keep going until 1
1905 * thread has a release preceded by an acquire and you've seen
1908 /* acq_rel RMW is a sufficient stopping condition */
1909 if (rf->is_acquire() && rf->is_release())
1910 return true; /* complete */
1912 rf = rf->get_reads_from();
1915 /* read from future: need to settle this later */
1917 return false; /* incomplete */
1920 if (rf->is_release())
1921 return true; /* complete */
1923 /* else relaxed write
1924 * - check for fence-release in the same thread (29.8, stmt. 3)
1925 * - check modification order for contiguous subsequence
1926 * -> rf must be same thread as release */
1928 const ModelAction *fence_release = rf->get_last_fence_release();
1929 /* Synchronize with a fence-release unconditionally; we don't need to
1930 * find any more "contiguous subsequence..." for it */
1932 release_heads->push_back(fence_release);
1934 int tid = id_to_int(rf->get_tid());
1935 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1936 action_list_t *list = &(*thrd_lists)[tid];
1937 action_list_t::const_reverse_iterator rit;
1939 /* Find rf in the thread list */
1940 rit = std::find(list->rbegin(), list->rend(), rf);
1941 ASSERT(rit != list->rend());
1943 /* Find the last {write,fence}-release */
1944 for (; rit != list->rend(); rit++) {
1945 if (fence_release && *(*rit) < *fence_release)
1947 if ((*rit)->is_release())
1950 if (rit == list->rend()) {
1951 /* No write-release in this thread */
1952 return true; /* complete */
1953 } else if (fence_release && *(*rit) < *fence_release) {
1954 /* The fence-release is more recent (and so, "stronger") than
1955 * the most recent write-release */
1956 return true; /* complete */
1957 } /* else, need to establish contiguous release sequence */
1958 ModelAction *release = *rit;
1960 ASSERT(rf->same_thread(release));
1962 pending->writes.clear();
1964 bool certain = true;
1965 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1966 if (id_to_int(rf->get_tid()) == (int)i)
1968 list = &(*thrd_lists)[i];
1970 /* Can we ensure no future writes from this thread may break
1971 * the release seq? */
1972 bool future_ordered = false;
1974 ModelAction *last = get_last_action(int_to_id(i));
1975 Thread *th = get_thread(int_to_id(i));
1976 if ((last && rf->happens_before(last)) ||
1979 future_ordered = true;
1981 ASSERT(!th->is_model_thread() || future_ordered);
1983 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1984 const ModelAction *act = *rit;
1985 /* Reach synchronization -> this thread is complete */
1986 if (act->happens_before(release))
1988 if (rf->happens_before(act)) {
1989 future_ordered = true;
1993 /* Only non-RMW writes can break release sequences */
1994 if (!act->is_write() || act->is_rmw())
1997 /* Check modification order */
1998 if (mo_graph->checkReachable(rf, act)) {
1999 /* rf --mo--> act */
2000 future_ordered = true;
2003 if (mo_graph->checkReachable(act, release))
2004 /* act --mo--> release */
2006 if (mo_graph->checkReachable(release, act) &&
2007 mo_graph->checkReachable(act, rf)) {
2008 /* release --mo-> act --mo--> rf */
2009 return true; /* complete */
2011 /* act may break release sequence */
2012 pending->writes.push_back(act);
2015 if (!future_ordered)
2016 certain = false; /* This thread is uncertain */
2020 release_heads->push_back(release);
2021 pending->writes.clear();
2023 pending->release = release;
2030 * An interface for getting the release sequence head(s) with which a
2031 * given ModelAction must synchronize. This function only returns a non-empty
2032 * result when it can locate a release sequence head with certainty. Otherwise,
2033 * it may mark the internal state of the ModelChecker so that it will handle
2034 * the release sequence at a later time, causing @a acquire to update its
2035 * synchronization at some later point in execution.
2037 * @param acquire The 'acquire' action that may synchronize with a release
2039 * @param read The read action that may read from a release sequence; this may
2040 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2041 * when 'acquire' is a fence-acquire)
2042 * @param release_heads A pass-by-reference return parameter. Will be filled
2043 * with the head(s) of the release sequence(s), if they exists with certainty.
2044 * @see ModelChecker::release_seq_heads
2046 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2047 ModelAction *read, rel_heads_list_t *release_heads)
2049 const ModelAction *rf = read->get_reads_from();
2050 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2051 sequence->acquire = acquire;
2052 sequence->read = read;
2054 if (!release_seq_heads(rf, release_heads, sequence)) {
2055 /* add act to 'lazy checking' list */
2056 pending_rel_seqs->push_back(sequence);
2058 snapshot_free(sequence);
2063 * Attempt to resolve all stashed operations that might synchronize with a
2064 * release sequence for a given location. This implements the "lazy" portion of
2065 * determining whether or not a release sequence was contiguous, since not all
2066 * modification order information is present at the time an action occurs.
2068 * @param location The location/object that should be checked for release
2069 * sequence resolutions. A NULL value means to check all locations.
2070 * @param work_queue The work queue to which to add work items as they are
2072 * @return True if any updates occurred (new synchronization, new mo_graph
2075 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2077 bool updated = false;
2078 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2079 while (it != pending_rel_seqs->end()) {
2080 struct release_seq *pending = *it;
2081 ModelAction *acquire = pending->acquire;
2082 const ModelAction *read = pending->read;
2084 /* Only resolve sequences on the given location, if provided */
2085 if (location && read->get_location() != location) {
2090 const ModelAction *rf = read->get_reads_from();
2091 rel_heads_list_t release_heads;
2093 complete = release_seq_heads(rf, &release_heads, pending);
2094 for (unsigned int i = 0; i < release_heads.size(); i++) {
2095 if (!acquire->has_synchronized_with(release_heads[i])) {
2096 if (acquire->synchronize_with(release_heads[i]))
2099 set_bad_synchronization();
2104 /* Re-check all pending release sequences */
2105 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2106 /* Re-check read-acquire for mo_graph edges */
2107 if (acquire->is_read())
2108 work_queue->push_back(MOEdgeWorkEntry(acquire));
2110 /* propagate synchronization to later actions */
2111 action_list_t::reverse_iterator rit = action_trace->rbegin();
2112 for (; (*rit) != acquire; rit++) {
2113 ModelAction *propagate = *rit;
2114 if (acquire->happens_before(propagate)) {
2115 propagate->synchronize_with(acquire);
2116 /* Re-check 'propagate' for mo_graph edges */
2117 work_queue->push_back(MOEdgeWorkEntry(propagate));
2122 it = pending_rel_seqs->erase(it);
2123 snapshot_free(pending);
2129 // If we resolved promises or data races, see if we have realized a data race.
2136 * Performs various bookkeeping operations for the current ModelAction. For
2137 * instance, adds action to the per-object, per-thread action vector and to the
2138 * action trace list of all thread actions.
2140 * @param act is the ModelAction to add.
2142 void ModelChecker::add_action_to_lists(ModelAction *act)
2144 int tid = id_to_int(act->get_tid());
2145 ModelAction *uninit = NULL;
2147 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2148 if (list->empty() && act->is_atomic_var()) {
2149 uninit = new_uninitialized_action(act->get_location());
2150 uninit_id = id_to_int(uninit->get_tid());
2151 list->push_back(uninit);
2153 list->push_back(act);
2155 action_trace->push_back(act);
2157 action_trace->push_front(uninit);
2159 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2160 if (tid >= (int)vec->size())
2161 vec->resize(priv->next_thread_id);
2162 (*vec)[tid].push_back(act);
2164 (*vec)[uninit_id].push_front(uninit);
2166 if ((int)thrd_last_action->size() <= tid)
2167 thrd_last_action->resize(get_num_threads());
2168 (*thrd_last_action)[tid] = act;
2170 (*thrd_last_action)[uninit_id] = uninit;
2172 if (act->is_fence() && act->is_release()) {
2173 if ((int)thrd_last_fence_release->size() <= tid)
2174 thrd_last_fence_release->resize(get_num_threads());
2175 (*thrd_last_fence_release)[tid] = act;
2178 if (act->is_wait()) {
2179 void *mutex_loc = (void *) act->get_value();
2180 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2182 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2183 if (tid >= (int)vec->size())
2184 vec->resize(priv->next_thread_id);
2185 (*vec)[tid].push_back(act);
2190 * @brief Get the last action performed by a particular Thread
2191 * @param tid The thread ID of the Thread in question
2192 * @return The last action in the thread
2194 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2196 int threadid = id_to_int(tid);
2197 if (threadid < (int)thrd_last_action->size())
2198 return (*thrd_last_action)[id_to_int(tid)];
2204 * @brief Get the last fence release performed by a particular Thread
2205 * @param tid The thread ID of the Thread in question
2206 * @return The last fence release in the thread, if one exists; NULL otherwise
2208 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2210 int threadid = id_to_int(tid);
2211 if (threadid < (int)thrd_last_fence_release->size())
2212 return (*thrd_last_fence_release)[id_to_int(tid)];
2218 * Gets the last memory_order_seq_cst write (in the total global sequence)
2219 * performed on a particular object (i.e., memory location), not including the
2221 * @param curr The current ModelAction; also denotes the object location to
2223 * @return The last seq_cst write
2225 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2227 void *location = curr->get_location();
2228 action_list_t *list = get_safe_ptr_action(obj_map, location);
2229 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2230 action_list_t::reverse_iterator rit;
2231 for (rit = list->rbegin(); rit != list->rend(); rit++)
2232 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2238 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2239 * performed in a particular thread, prior to a particular fence.
2240 * @param tid The ID of the thread to check
2241 * @param before_fence The fence from which to begin the search; if NULL, then
2242 * search for the most recent fence in the thread.
2243 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2245 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2247 /* All fences should have NULL location */
2248 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2249 action_list_t::reverse_iterator rit = list->rbegin();
2252 for (; rit != list->rend(); rit++)
2253 if (*rit == before_fence)
2256 ASSERT(*rit == before_fence);
2260 for (; rit != list->rend(); rit++)
2261 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2267 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2268 * location). This function identifies the mutex according to the current
2269 * action, which is presumed to perform on the same mutex.
2270 * @param curr The current ModelAction; also denotes the object location to
2272 * @return The last unlock operation
2274 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2276 void *location = curr->get_location();
2277 action_list_t *list = get_safe_ptr_action(obj_map, location);
2278 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2279 action_list_t::reverse_iterator rit;
2280 for (rit = list->rbegin(); rit != list->rend(); rit++)
2281 if ((*rit)->is_unlock() || (*rit)->is_wait())
2286 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2288 ModelAction *parent = get_last_action(tid);
2290 parent = get_thread(tid)->get_creation();
2295 * Returns the clock vector for a given thread.
2296 * @param tid The thread whose clock vector we want
2297 * @return Desired clock vector
2299 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2301 return get_parent_action(tid)->get_cv();
2305 * Resolve a set of Promises with a current write. The set is provided in the
2306 * Node corresponding to @a write.
2307 * @param write The ModelAction that is fulfilling Promises
2308 * @return True if promises were resolved; false otherwise
2310 bool ModelChecker::resolve_promises(ModelAction *write)
2312 bool resolved = false;
2313 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2315 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2316 Promise *promise = (*promises)[promise_index];
2317 if (write->get_node()->get_promise(i)) {
2318 ModelAction *read = promise->get_action();
2319 if (read->is_rmw()) {
2320 mo_graph->addRMWEdge(write, read);
2322 read_from(read, write);
2323 //First fix up the modification order for actions that happened
2325 r_modification_order(read, write);
2326 //Next fix up the modification order for actions that happened
2328 post_r_modification_order(read, write);
2329 //Make sure the promise's value matches the write's value
2330 ASSERT(promise->get_value() == write->get_value());
2333 promises->erase(promises->begin() + promise_index);
2334 threads_to_check.push_back(read->get_tid());
2341 //Check whether reading these writes has made threads unable to
2344 for (unsigned int i = 0; i < threads_to_check.size(); i++)
2345 mo_check_promises(threads_to_check[i], write);
2351 * Compute the set of promises that could potentially be satisfied by this
2352 * action. Note that the set computation actually appears in the Node, not in
2354 * @param curr The ModelAction that may satisfy promises
2356 void ModelChecker::compute_promises(ModelAction *curr)
2358 for (unsigned int i = 0; i < promises->size(); i++) {
2359 Promise *promise = (*promises)[i];
2360 const ModelAction *act = promise->get_action();
2361 if (!act->happens_before(curr) &&
2363 !act->could_synchronize_with(curr) &&
2364 !act->same_thread(curr) &&
2365 act->get_location() == curr->get_location() &&
2366 promise->get_value() == curr->get_value()) {
2367 curr->get_node()->set_promise(i, act->is_rmw());
2372 /** Checks promises in response to change in ClockVector Threads. */
2373 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2375 for (unsigned int i = 0; i < promises->size(); i++) {
2376 Promise *promise = (*promises)[i];
2377 const ModelAction *act = promise->get_action();
2378 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2379 merge_cv->synchronized_since(act)) {
2380 if (promise->increment_threads(tid)) {
2381 //Promise has failed
2382 priv->failed_promise = true;
2389 void ModelChecker::check_promises_thread_disabled() {
2390 for (unsigned int i = 0; i < promises->size(); i++) {
2391 Promise *promise = (*promises)[i];
2392 if (promise->check_promise()) {
2393 priv->failed_promise = true;
2399 /** Checks promises in response to addition to modification order for threads.
2401 * pthread is the thread that performed the read that created the promise
2403 * pread is the read that created the promise
2405 * pwrite is either the first write to same location as pread by
2406 * pthread that is sequenced after pread or the value read by the
2407 * first read to the same lcoation as pread by pthread that is
2408 * sequenced after pread..
2410 * 1. If tid=pthread, then we check what other threads are reachable
2411 * through the mode order starting with pwrite. Those threads cannot
2412 * perform a write that will resolve the promise due to modification
2413 * order constraints.
2415 * 2. If the tid is not pthread, we check whether pwrite can reach the
2416 * action write through the modification order. If so, that thread
2417 * cannot perform a future write that will resolve the promise due to
2418 * modificatin order constraints.
2420 * @param tid The thread that either read from the model action
2421 * write, or actually did the model action write.
2423 * @param write The ModelAction representing the relevant write.
2425 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write)
2427 void *location = write->get_location();
2428 for (unsigned int i = 0; i < promises->size(); i++) {
2429 Promise *promise = (*promises)[i];
2430 const ModelAction *act = promise->get_action();
2432 //Is this promise on the same location?
2433 if (act->get_location() != location)
2436 //same thread as the promise
2437 if (act->get_tid() == tid) {
2439 //do we have a pwrite for the promise, if not, set it
2440 if (promise->get_write() == NULL) {
2441 promise->set_write(write);
2442 //The pwrite cannot happen before the promise
2443 if (write->happens_before(act) && (write != act)) {
2444 priv->failed_promise = true;
2448 if (mo_graph->checkPromise(write, promise)) {
2449 priv->failed_promise = true;
2454 //Don't do any lookups twice for the same thread
2455 if (promise->has_sync_thread(tid))
2458 if (promise->get_write() && mo_graph->checkReachable(promise->get_write(), write)) {
2459 if (promise->increment_threads(tid)) {
2460 priv->failed_promise = true;
2468 * Compute the set of writes that may break the current pending release
2469 * sequence. This information is extracted from previou release sequence
2472 * @param curr The current ModelAction. Must be a release sequence fixup
2475 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2477 if (pending_rel_seqs->empty())
2480 struct release_seq *pending = pending_rel_seqs->back();
2481 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2482 const ModelAction *write = pending->writes[i];
2483 curr->get_node()->add_relseq_break(write);
2486 /* NULL means don't break the sequence; just synchronize */
2487 curr->get_node()->add_relseq_break(NULL);
2491 * Build up an initial set of all past writes that this 'read' action may read
2492 * from. This set is determined by the clock vector's "happens before"
2494 * @param curr is the current ModelAction that we are exploring; it must be a
2497 void ModelChecker::build_reads_from_past(ModelAction *curr)
2499 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2501 ASSERT(curr->is_read());
2503 ModelAction *last_sc_write = NULL;
2505 if (curr->is_seqcst())
2506 last_sc_write = get_last_seq_cst_write(curr);
2508 /* Iterate over all threads */
2509 for (i = 0; i < thrd_lists->size(); i++) {
2510 /* Iterate over actions in thread, starting from most recent */
2511 action_list_t *list = &(*thrd_lists)[i];
2512 action_list_t::reverse_iterator rit;
2513 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2514 ModelAction *act = *rit;
2516 /* Only consider 'write' actions */
2517 if (!act->is_write() || act == curr)
2520 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2521 bool allow_read = true;
2523 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2525 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2529 curr->get_node()->add_read_from(act);
2531 /* Include at most one act per-thread that "happens before" curr */
2532 if (act->happens_before(curr))
2537 if (DBG_ENABLED()) {
2538 model_print("Reached read action:\n");
2540 model_print("Printing may_read_from\n");
2541 curr->get_node()->print_may_read_from();
2542 model_print("End printing may_read_from\n");
2546 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2549 /* UNINIT actions don't have a Node, and they never sleep */
2550 if (write->is_uninitialized())
2552 Node *prevnode = write->get_node()->get_parent();
2554 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2555 if (write->is_release() && thread_sleep)
2557 if (!write->is_rmw()) {
2560 if (write->get_reads_from() == NULL)
2562 write = write->get_reads_from();
2567 * @brief Create a new action representing an uninitialized atomic
2568 * @param location The memory location of the atomic object
2569 * @return A pointer to a new ModelAction
2571 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2573 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2574 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2575 act->create_cv(NULL);
2579 static void print_list(action_list_t *list)
2581 action_list_t::iterator it;
2583 model_print("---------------------------------------------------------------------\n");
2585 unsigned int hash = 0;
2587 for (it = list->begin(); it != list->end(); it++) {
2589 hash = hash^(hash<<3)^((*it)->hash());
2591 model_print("HASH %u\n", hash);
2592 model_print("---------------------------------------------------------------------\n");
2595 #if SUPPORT_MOD_ORDER_DUMP
2596 void ModelChecker::dumpGraph(char *filename) const
2599 sprintf(buffer, "%s.dot", filename);
2600 FILE *file = fopen(buffer, "w");
2601 fprintf(file, "digraph %s {\n", filename);
2602 mo_graph->dumpNodes(file);
2603 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2605 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2606 ModelAction *action = *it;
2607 if (action->is_read()) {
2608 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2609 if (action->get_reads_from() != NULL)
2610 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2612 if (thread_array[action->get_tid()] != NULL) {
2613 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2616 thread_array[action->get_tid()] = action;
2618 fprintf(file, "}\n");
2619 model_free(thread_array);
2624 /** @brief Prints an execution trace summary. */
2625 void ModelChecker::print_summary() const
2627 #if SUPPORT_MOD_ORDER_DUMP
2629 char buffername[100];
2630 sprintf(buffername, "exec%04u", stats.num_total);
2631 mo_graph->dumpGraphToFile(buffername);
2632 sprintf(buffername, "graph%04u", stats.num_total);
2633 dumpGraph(buffername);
2636 model_print("Execution %d:", stats.num_total);
2637 if (isfeasibleprefix())
2640 print_infeasibility(" INFEASIBLE");
2641 print_list(action_trace);
2646 * Add a Thread to the system for the first time. Should only be called once
2648 * @param t The Thread to add
2650 void ModelChecker::add_thread(Thread *t)
2652 thread_map->put(id_to_int(t->get_id()), t);
2653 scheduler->add_thread(t);
2657 * Removes a thread from the scheduler.
2658 * @param the thread to remove.
2660 void ModelChecker::remove_thread(Thread *t)
2662 scheduler->remove_thread(t);
2666 * @brief Get a Thread reference by its ID
2667 * @param tid The Thread's ID
2668 * @return A Thread reference
2670 Thread * ModelChecker::get_thread(thread_id_t tid) const
2672 return thread_map->get(id_to_int(tid));
2676 * @brief Get a reference to the Thread in which a ModelAction was executed
2677 * @param act The ModelAction
2678 * @return A Thread reference
2680 Thread * ModelChecker::get_thread(ModelAction *act) const
2682 return get_thread(act->get_tid());
2686 * @brief Check if a Thread is currently enabled
2687 * @param t The Thread to check
2688 * @return True if the Thread is currently enabled
2690 bool ModelChecker::is_enabled(Thread *t) const
2692 return scheduler->is_enabled(t);
2696 * @brief Check if a Thread is currently enabled
2697 * @param tid The ID of the Thread to check
2698 * @return True if the Thread is currently enabled
2700 bool ModelChecker::is_enabled(thread_id_t tid) const
2702 return scheduler->is_enabled(tid);
2706 * Switch from a user-context to the "master thread" context (a.k.a. system
2707 * context). This switch is made with the intention of exploring a particular
2708 * model-checking action (described by a ModelAction object). Must be called
2709 * from a user-thread context.
2711 * @param act The current action that will be explored. May be NULL only if
2712 * trace is exiting via an assertion (see ModelChecker::set_assert and
2713 * ModelChecker::has_asserted).
2714 * @return Return the value returned by the current action
2716 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2719 Thread *old = thread_current();
2720 set_current_action(act);
2721 old->set_state(THREAD_READY);
2722 if (Thread::swap(old, &system_context) < 0) {
2723 perror("swap threads");
2726 return old->get_return_value();
2730 * Takes the next step in the execution, if possible.
2731 * @param curr The current step to take
2732 * @return Returns true (success) if a step was taken and false otherwise.
2734 bool ModelChecker::take_step(ModelAction *curr)
2739 Thread *curr_thrd = get_thread(curr);
2740 ASSERT(curr_thrd->get_state() == THREAD_READY);
2742 curr = check_current_action(curr);
2744 /* Infeasible -> don't take any more steps */
2745 if (is_infeasible())
2747 else if (isfeasibleprefix() && have_bug_reports()) {
2752 if (params.bound != 0)
2753 if (priv->used_sequence_numbers > params.bound)
2756 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2757 scheduler->remove_thread(curr_thrd);
2759 Thread *next_thrd = get_next_thread(curr);
2760 next_thrd = scheduler->next_thread(next_thrd);
2762 DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2763 next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2766 * Launch end-of-execution release sequence fixups only when there are:
2768 * (1) no more user threads to run (or when execution replay chooses
2769 * the 'model_thread')
2770 * (2) pending release sequences
2771 * (3) pending assertions (i.e., data races)
2772 * (4) no pending promises
2774 if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2775 is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2776 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2777 pending_rel_seqs->size());
2778 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2779 std::memory_order_seq_cst, NULL, VALUE_NONE,
2781 set_current_action(fixup);
2785 /* next_thrd == NULL -> don't take any more steps */
2789 next_thrd->set_state(THREAD_RUNNING);
2791 if (next_thrd->get_pending() != NULL) {
2792 /* restart a pending action */
2793 set_current_action(next_thrd->get_pending());
2794 next_thrd->set_pending(NULL);
2795 next_thrd->set_state(THREAD_READY);
2799 /* Return false only if swap fails with an error */
2800 return (Thread::swap(&system_context, next_thrd) == 0);
2803 /** Wrapper to run the user's main function, with appropriate arguments */
2804 void user_main_wrapper(void *)
2806 user_main(model->params.argc, model->params.argv);
2809 /** @brief Run ModelChecker for the user program */
2810 void ModelChecker::run()
2814 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2818 /* Run user thread up to its first action */
2819 scheduler->next_thread(t);
2820 Thread::swap(&system_context, t);
2822 /* Wait for all threads to complete */
2823 while (take_step(priv->current_action));
2824 } while (next_execution());