10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
42 /* First thread created will have id INITIAL_THREAD_ID */
43 next_thread_id(INITIAL_THREAD_ID),
44 used_sequence_numbers(0),
48 failed_promise(false),
49 too_many_reads(false),
50 no_valid_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 unsigned int next_thread_id;
62 modelclock_t used_sequence_numbers;
63 ModelAction *next_backtrack;
64 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94 node_stack(new NodeStack()),
95 priv(new struct model_snapshot_members()),
96 mo_graph(new CycleGraph())
98 /* Initialize a model-checker thread, for special ModelActions */
99 model_thread = new Thread(get_next_id());
100 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
106 for (unsigned int i = 0; i < get_num_threads(); i++)
107 delete thread_map->get(i);
112 delete lock_waiters_map;
113 delete condvar_waiters_map;
116 for (unsigned int i = 0; i < promises->size(); i++)
117 delete (*promises)[i];
120 delete pending_rel_seqs;
122 delete thrd_last_action;
123 delete thrd_last_fence_release;
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
132 action_list_t *tmp = hash->get(ptr);
134 tmp = new action_list_t();
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
142 std::vector<action_list_t> *tmp = hash->get(ptr);
144 tmp = new std::vector<action_list_t>();
151 * Restores user program to initial state and resets all model-checker data
154 void ModelChecker::reset_to_initial_state()
156 DEBUG("+++ Resetting to initial state +++\n");
157 node_stack->reset_execution();
159 /* Print all model-checker output before rollback */
163 * FIXME: if we utilize partial rollback, we will need to free only
164 * those pending actions which were NOT pending before the rollback
167 for (unsigned int i = 0; i < get_num_threads(); i++)
168 delete get_thread(int_to_id(i))->get_pending();
170 snapshot_backtrack_before(0);
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
176 return priv->next_thread_id++;
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
182 return priv->next_thread_id;
186 * Must be called from user-thread context (e.g., through the global
187 * thread_current() interface)
189 * @return The currently executing Thread.
191 Thread * ModelChecker::get_current_thread() const
193 return scheduler->get_current_thread();
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
199 return ++priv->used_sequence_numbers;
202 Node * ModelChecker::get_curr_node() const
204 return node_stack->get_head();
208 * @brief Choose the next thread to execute.
210 * This function chooses the next thread that should execute. It can force the
211 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213 * The model-checker may have no preference regarding the next thread (i.e.,
214 * when exploring a new execution ordering), in which case we defer to the
217 * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218 * might guide the choice of next thread (i.e., THREAD_CREATE should be
219 * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220 * @return The next chosen thread to run, if any exist. Or else if no threads
221 * remain to be executed, return NULL.
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
228 /* Do not split atomic actions. */
230 return get_thread(curr);
231 else if (curr->get_type() == THREAD_CREATE)
232 return curr->get_thread_operand();
236 * Have we completed exploring the preselected path? Then let the
240 return scheduler->select_next_thread();
242 /* Else, we are trying to replay an execution */
243 ModelAction *next = node_stack->get_next()->get_action();
245 if (next == diverge) {
246 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247 earliest_diverge = diverge;
249 Node *nextnode = next->get_node();
250 Node *prevnode = nextnode->get_parent();
251 scheduler->update_sleep_set(prevnode);
253 /* Reached divergence point */
254 if (nextnode->increment_misc()) {
255 /* The next node will try to satisfy a different misc_index values. */
256 tid = next->get_tid();
257 node_stack->pop_restofstack(2);
258 } else if (nextnode->increment_promise()) {
259 /* The next node will try to satisfy a different set of promises. */
260 tid = next->get_tid();
261 node_stack->pop_restofstack(2);
262 } else if (nextnode->increment_read_from()) {
263 /* The next node will read from a different value. */
264 tid = next->get_tid();
265 node_stack->pop_restofstack(2);
266 } else if (nextnode->increment_future_value()) {
267 /* The next node will try to read from a different future value. */
268 tid = next->get_tid();
269 node_stack->pop_restofstack(2);
270 } else if (nextnode->increment_relseq_break()) {
271 /* The next node will try to resolve a release sequence differently */
272 tid = next->get_tid();
273 node_stack->pop_restofstack(2);
276 /* Make a different thread execute for next step */
277 scheduler->add_sleep(get_thread(next->get_tid()));
278 tid = prevnode->get_next_backtrack();
279 /* Make sure the backtracked thread isn't sleeping. */
280 node_stack->pop_restofstack(1);
281 if (diverge == earliest_diverge) {
282 earliest_diverge = prevnode->get_action();
285 /* The correct sleep set is in the parent node. */
288 DEBUG("*** Divergence point ***\n");
292 tid = next->get_tid();
294 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
295 ASSERT(tid != THREAD_ID_T_NONE);
296 return thread_map->get(id_to_int(tid));
300 * We need to know what the next actions of all threads in the sleep
301 * set will be. This method computes them and stores the actions at
302 * the corresponding thread object's pending action.
305 void ModelChecker::execute_sleep_set()
307 for (unsigned int i = 0; i < get_num_threads(); i++) {
308 thread_id_t tid = int_to_id(i);
309 Thread *thr = get_thread(tid);
310 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
311 thr->get_pending()->set_sleep_flag();
316 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
318 for (unsigned int i = 0; i < get_num_threads(); i++) {
319 Thread *thr = get_thread(int_to_id(i));
320 if (scheduler->is_sleep_set(thr)) {
321 ModelAction *pending_act = thr->get_pending();
322 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
323 //Remove this thread from sleep set
324 scheduler->remove_sleep(thr);
329 /** @brief Alert the model-checker that an incorrectly-ordered
330 * synchronization was made */
331 void ModelChecker::set_bad_synchronization()
333 priv->bad_synchronization = true;
337 * Check whether the current trace has triggered an assertion which should halt
340 * @return True, if the execution should be aborted; false otherwise
342 bool ModelChecker::has_asserted() const
344 return priv->asserted;
348 * Trigger a trace assertion which should cause this execution to be halted.
349 * This can be due to a detected bug or due to an infeasibility that should
352 void ModelChecker::set_assert()
354 priv->asserted = true;
358 * Check if we are in a deadlock. Should only be called at the end of an
359 * execution, although it should not give false positives in the middle of an
360 * execution (there should be some ENABLED thread).
362 * @return True if program is in a deadlock; false otherwise
364 bool ModelChecker::is_deadlocked() const
366 bool blocking_threads = false;
367 for (unsigned int i = 0; i < get_num_threads(); i++) {
368 thread_id_t tid = int_to_id(i);
371 Thread *t = get_thread(tid);
372 if (!t->is_model_thread() && t->get_pending())
373 blocking_threads = true;
375 return blocking_threads;
379 * Check if this is a complete execution. That is, have all thread completed
380 * execution (rather than exiting because sleep sets have forced a redundant
383 * @return True if the execution is complete.
385 bool ModelChecker::is_complete_execution() const
387 for (unsigned int i = 0; i < get_num_threads(); i++)
388 if (is_enabled(int_to_id(i)))
394 * @brief Assert a bug in the executing program.
396 * Use this function to assert any sort of bug in the user program. If the
397 * current trace is feasible (actually, a prefix of some feasible execution),
398 * then this execution will be aborted, printing the appropriate message. If
399 * the current trace is not yet feasible, the error message will be stashed and
400 * printed if the execution ever becomes feasible.
402 * @param msg Descriptive message for the bug (do not include newline char)
403 * @return True if bug is immediately-feasible
405 bool ModelChecker::assert_bug(const char *msg)
407 priv->bugs.push_back(new bug_message(msg));
409 if (isfeasibleprefix()) {
417 * @brief Assert a bug in the executing program, asserted by a user thread
418 * @see ModelChecker::assert_bug
419 * @param msg Descriptive message for the bug (do not include newline char)
421 void ModelChecker::assert_user_bug(const char *msg)
423 /* If feasible bug, bail out now */
425 switch_to_master(NULL);
428 /** @return True, if any bugs have been reported for this execution */
429 bool ModelChecker::have_bug_reports() const
431 return priv->bugs.size() != 0;
434 /** @brief Print bug report listing for this execution (if any bugs exist) */
435 void ModelChecker::print_bugs() const
437 if (have_bug_reports()) {
438 model_print("Bug report: %zu bug%s detected\n",
440 priv->bugs.size() > 1 ? "s" : "");
441 for (unsigned int i = 0; i < priv->bugs.size(); i++)
442 priv->bugs[i]->print();
447 * @brief Record end-of-execution stats
449 * Must be run when exiting an execution. Records various stats.
450 * @see struct execution_stats
452 void ModelChecker::record_stats()
455 if (!isfeasibleprefix())
456 stats.num_infeasible++;
457 else if (have_bug_reports())
458 stats.num_buggy_executions++;
459 else if (is_complete_execution())
460 stats.num_complete++;
462 stats.num_redundant++;
465 /** @brief Print execution stats */
466 void ModelChecker::print_stats() const
468 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
469 model_print("Number of redundant executions: %d\n", stats.num_redundant);
470 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
471 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
472 model_print("Total executions: %d\n", stats.num_total);
473 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
477 * @brief End-of-exeuction print
478 * @param printbugs Should any existing bugs be printed?
480 void ModelChecker::print_execution(bool printbugs) const
482 print_program_output();
484 if (DBG_ENABLED() || params.verbose) {
485 model_print("Earliest divergence point since last feasible execution:\n");
486 if (earliest_diverge)
487 earliest_diverge->print();
489 model_print("(Not set)\n");
495 /* Don't print invalid bugs */
504 * Queries the model-checker for more executions to explore and, if one
505 * exists, resets the model-checker state to execute a new execution.
507 * @return If there are more executions to explore, return true. Otherwise,
510 bool ModelChecker::next_execution()
513 /* Is this execution a feasible execution that's worth bug-checking? */
514 bool complete = isfeasibleprefix() && (is_complete_execution() ||
517 /* End-of-execution bug checks */
520 assert_bug("Deadlock detected");
528 if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
529 print_execution(complete);
531 clear_program_output();
534 earliest_diverge = NULL;
536 if ((diverge = get_next_backtrack()) == NULL)
540 model_print("Next execution will diverge at:\n");
544 reset_to_initial_state();
549 * @brief Find the last fence-related backtracking conflict for a ModelAction
551 * This function performs the search for the most recent conflicting action
552 * against which we should perform backtracking, as affected by fence
553 * operations. This includes pairs of potentially-synchronizing actions which
554 * occur due to fence-acquire or fence-release, and hence should be explored in
555 * the opposite execution order.
557 * @param act The current action
558 * @return The most recent action which conflicts with act due to fences
560 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
562 /* Only perform release/acquire fence backtracking for stores */
563 if (!act->is_write())
566 /* Find a fence-release (or, act is a release) */
567 ModelAction *last_release;
568 if (act->is_release())
571 last_release = get_last_fence_release(act->get_tid());
575 std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
576 std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
577 bool found_acquire_fences = false;
581 * load --sb-> fence-acquire */
582 action_list_t *list = action_trace;
583 action_list_t::reverse_iterator rit;
584 for (rit = list->rbegin(); rit != list->rend(); rit++) {
585 ModelAction *prev = *rit;
586 if (act->same_thread(prev))
589 int tid = id_to_int(prev->get_tid());
591 if (prev->is_read() && act->same_var(prev)) {
592 if (prev->is_acquire()) {
593 /* Found most recent load-acquire, don't need
594 * to search for more fences */
595 if (!found_acquire_fences)
598 prior_loads[tid] = prev;
601 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
602 found_acquire_fences = true;
603 acquire_fences[tid] = prev;
607 ModelAction *latest_backtrack = NULL;
608 for (unsigned int i = 0; i < acquire_fences.size(); i++)
609 if (acquire_fences[i] && prior_loads[i])
610 if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
611 latest_backtrack = acquire_fences[i];
612 return latest_backtrack;
616 * @brief Find the last backtracking conflict for a ModelAction
618 * This function performs the search for the most recent conflicting action
619 * against which we should perform backtracking. This primary includes pairs of
620 * synchronizing actions which should be explored in the opposite execution
623 * @param act The current action
624 * @return The most recent action which conflicts with act
626 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
628 switch (act->get_type()) {
629 /* case ATOMIC_FENCE: fences don't directly cause backtracking */
633 ModelAction *ret = NULL;
635 /* linear search: from most recent to oldest */
636 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
637 action_list_t::reverse_iterator rit;
638 for (rit = list->rbegin(); rit != list->rend(); rit++) {
639 ModelAction *prev = *rit;
640 if (prev->could_synchronize_with(act)) {
646 ModelAction *ret2 = get_last_fence_conflict(act);
656 case ATOMIC_TRYLOCK: {
657 /* linear search: from most recent to oldest */
658 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
659 action_list_t::reverse_iterator rit;
660 for (rit = list->rbegin(); rit != list->rend(); rit++) {
661 ModelAction *prev = *rit;
662 if (act->is_conflicting_lock(prev))
667 case ATOMIC_UNLOCK: {
668 /* linear search: from most recent to oldest */
669 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
670 action_list_t::reverse_iterator rit;
671 for (rit = list->rbegin(); rit != list->rend(); rit++) {
672 ModelAction *prev = *rit;
673 if (!act->same_thread(prev) && prev->is_failed_trylock())
679 /* linear search: from most recent to oldest */
680 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
681 action_list_t::reverse_iterator rit;
682 for (rit = list->rbegin(); rit != list->rend(); rit++) {
683 ModelAction *prev = *rit;
684 if (!act->same_thread(prev) && prev->is_failed_trylock())
686 if (!act->same_thread(prev) && prev->is_notify())
692 case ATOMIC_NOTIFY_ALL:
693 case ATOMIC_NOTIFY_ONE: {
694 /* linear search: from most recent to oldest */
695 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
696 action_list_t::reverse_iterator rit;
697 for (rit = list->rbegin(); rit != list->rend(); rit++) {
698 ModelAction *prev = *rit;
699 if (!act->same_thread(prev) && prev->is_wait())
710 /** This method finds backtracking points where we should try to
711 * reorder the parameter ModelAction against.
713 * @param the ModelAction to find backtracking points for.
715 void ModelChecker::set_backtracking(ModelAction *act)
717 Thread *t = get_thread(act);
718 ModelAction *prev = get_last_conflict(act);
722 Node *node = prev->get_node()->get_parent();
724 int low_tid, high_tid;
725 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
726 low_tid = id_to_int(act->get_tid());
727 high_tid = low_tid + 1;
730 high_tid = get_num_threads();
733 for (int i = low_tid; i < high_tid; i++) {
734 thread_id_t tid = int_to_id(i);
736 /* Make sure this thread can be enabled here. */
737 if (i >= node->get_num_threads())
740 /* Don't backtrack into a point where the thread is disabled or sleeping. */
741 if (node->enabled_status(tid) != THREAD_ENABLED)
744 /* Check if this has been explored already */
745 if (node->has_been_explored(tid))
748 /* See if fairness allows */
749 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
751 for (int t = 0; t < node->get_num_threads(); t++) {
752 thread_id_t tother = int_to_id(t);
753 if (node->is_enabled(tother) && node->has_priority(tother)) {
761 /* Cache the latest backtracking point */
762 set_latest_backtrack(prev);
764 /* If this is a new backtracking point, mark the tree */
765 if (!node->set_backtrack(tid))
767 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
768 id_to_int(prev->get_tid()),
769 id_to_int(t->get_id()));
778 * @brief Cache the a backtracking point as the "most recent", if eligible
780 * Note that this does not prepare the NodeStack for this backtracking
781 * operation, it only caches the action on a per-execution basis
783 * @param act The operation at which we should explore a different next action
784 * (i.e., backtracking point)
785 * @return True, if this action is now the most recent backtracking point;
788 bool ModelChecker::set_latest_backtrack(ModelAction *act)
790 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
791 priv->next_backtrack = act;
798 * Returns last backtracking point. The model checker will explore a different
799 * path for this point in the next execution.
800 * @return The ModelAction at which the next execution should diverge.
802 ModelAction * ModelChecker::get_next_backtrack()
804 ModelAction *next = priv->next_backtrack;
805 priv->next_backtrack = NULL;
810 * Processes a read or rmw model action.
811 * @param curr is the read model action to process.
812 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
813 * @return True if processing this read updates the mo_graph.
815 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
817 uint64_t value = VALUE_NONE;
818 bool updated = false;
820 const ModelAction *reads_from = curr->get_node()->get_read_from();
821 if (reads_from != NULL) {
822 mo_graph->startChanges();
824 value = reads_from->get_value();
825 bool r_status = false;
827 if (!second_part_of_rmw) {
828 check_recency(curr, reads_from);
829 r_status = r_modification_order(curr, reads_from);
832 if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
833 mo_graph->rollbackChanges();
834 priv->too_many_reads = false;
838 read_from(curr, reads_from);
839 mo_graph->commitChanges();
840 mo_check_promises(curr, true);
843 } else if (!second_part_of_rmw) {
844 /* Read from future value */
845 struct future_value fv = curr->get_node()->get_future_value();
846 Promise *promise = new Promise(curr, fv);
848 curr->set_read_from_promise(promise);
849 promises->push_back(promise);
850 mo_graph->startChanges();
851 updated = r_modification_order(curr, promise);
852 mo_graph->commitChanges();
854 get_thread(curr)->set_return_value(value);
860 * Processes a lock, trylock, or unlock model action. @param curr is
861 * the read model action to process.
863 * The try lock operation checks whether the lock is taken. If not,
864 * it falls to the normal lock operation case. If so, it returns
867 * The lock operation has already been checked that it is enabled, so
868 * it just grabs the lock and synchronizes with the previous unlock.
870 * The unlock operation has to re-enable all of the threads that are
871 * waiting on the lock.
873 * @return True if synchronization was updated; false otherwise
875 bool ModelChecker::process_mutex(ModelAction *curr)
877 std::mutex *mutex = NULL;
878 struct std::mutex_state *state = NULL;
880 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
881 mutex = (std::mutex *)curr->get_location();
882 state = mutex->get_state();
883 } else if (curr->is_wait()) {
884 mutex = (std::mutex *)curr->get_value();
885 state = mutex->get_state();
888 switch (curr->get_type()) {
889 case ATOMIC_TRYLOCK: {
890 bool success = !state->islocked;
891 curr->set_try_lock(success);
893 get_thread(curr)->set_return_value(0);
896 get_thread(curr)->set_return_value(1);
898 //otherwise fall into the lock case
900 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
901 assert_bug("Lock access before initialization");
902 state->islocked = true;
903 ModelAction *unlock = get_last_unlock(curr);
904 //synchronize with the previous unlock statement
905 if (unlock != NULL) {
906 curr->synchronize_with(unlock);
911 case ATOMIC_UNLOCK: {
913 state->islocked = false;
914 //wake up the other threads
915 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
916 //activate all the waiting threads
917 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
918 scheduler->wake(get_thread(*rit));
925 state->islocked = false;
926 //wake up the other threads
927 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
928 //activate all the waiting threads
929 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
930 scheduler->wake(get_thread(*rit));
933 //check whether we should go to sleep or not...simulate spurious failures
934 if (curr->get_node()->get_misc() == 0) {
935 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
937 scheduler->sleep(get_thread(curr));
941 case ATOMIC_NOTIFY_ALL: {
942 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
943 //activate all the waiting threads
944 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
945 scheduler->wake(get_thread(*rit));
950 case ATOMIC_NOTIFY_ONE: {
951 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
952 int wakeupthread = curr->get_node()->get_misc();
953 action_list_t::iterator it = waiters->begin();
954 advance(it, wakeupthread);
955 scheduler->wake(get_thread(*it));
966 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
968 /* Do more ambitious checks now that mo is more complete */
969 if (mo_may_allow(writer, reader)) {
970 Node *node = reader->get_node();
972 /* Find an ancestor thread which exists at the time of the reader */
973 Thread *write_thread = get_thread(writer);
974 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
975 write_thread = write_thread->get_parent();
977 struct future_value fv = {
979 writer->get_seq_number() + params.maxfuturedelay,
980 write_thread->get_id(),
982 if (node->add_future_value(fv))
983 set_latest_backtrack(reader);
988 * Process a write ModelAction
989 * @param curr The ModelAction to process
990 * @return True if the mo_graph was updated or promises were resolved
992 bool ModelChecker::process_write(ModelAction *curr)
994 bool updated_mod_order = w_modification_order(curr);
995 bool updated_promises = resolve_promises(curr);
997 if (promises->size() == 0) {
998 for (unsigned int i = 0; i < futurevalues->size(); i++) {
999 struct PendingFutureValue pfv = (*futurevalues)[i];
1000 add_future_value(pfv.writer, pfv.act);
1002 futurevalues->clear();
1005 mo_graph->commitChanges();
1006 mo_check_promises(curr, false);
1008 get_thread(curr)->set_return_value(VALUE_NONE);
1009 return updated_mod_order || updated_promises;
1013 * Process a fence ModelAction
1014 * @param curr The ModelAction to process
1015 * @return True if synchronization was updated
1017 bool ModelChecker::process_fence(ModelAction *curr)
1020 * fence-relaxed: no-op
1021 * fence-release: only log the occurence (not in this function), for
1022 * use in later synchronization
1023 * fence-acquire (this function): search for hypothetical release
1026 bool updated = false;
1027 if (curr->is_acquire()) {
1028 action_list_t *list = action_trace;
1029 action_list_t::reverse_iterator rit;
1030 /* Find X : is_read(X) && X --sb-> curr */
1031 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1032 ModelAction *act = *rit;
1035 if (act->get_tid() != curr->get_tid())
1037 /* Stop at the beginning of the thread */
1038 if (act->is_thread_start())
1040 /* Stop once we reach a prior fence-acquire */
1041 if (act->is_fence() && act->is_acquire())
1043 if (!act->is_read())
1045 /* read-acquire will find its own release sequences */
1046 if (act->is_acquire())
1049 /* Establish hypothetical release sequences */
1050 rel_heads_list_t release_heads;
1051 get_release_seq_heads(curr, act, &release_heads);
1052 for (unsigned int i = 0; i < release_heads.size(); i++)
1053 if (!curr->synchronize_with(release_heads[i]))
1054 set_bad_synchronization();
1055 if (release_heads.size() != 0)
1063 * @brief Process the current action for thread-related activity
1065 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1066 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1067 * synchronization, etc. This function is a no-op for non-THREAD actions
1068 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1070 * @param curr The current action
1071 * @return True if synchronization was updated or a thread completed
1073 bool ModelChecker::process_thread_action(ModelAction *curr)
1075 bool updated = false;
1077 switch (curr->get_type()) {
1078 case THREAD_CREATE: {
1079 thrd_t *thrd = (thrd_t *)curr->get_location();
1080 struct thread_params *params = (struct thread_params *)curr->get_value();
1081 Thread *th = new Thread(thrd, params->func, params->arg);
1083 th->set_creation(curr);
1084 /* Promises can be satisfied by children */
1085 for (unsigned int i = 0; i < promises->size(); i++) {
1086 Promise *promise = (*promises)[i];
1087 if (promise->thread_is_available(curr->get_tid()))
1088 promise->add_thread(th->get_id());
1093 Thread *blocking = curr->get_thread_operand();
1094 ModelAction *act = get_last_action(blocking->get_id());
1095 curr->synchronize_with(act);
1096 updated = true; /* trigger rel-seq checks */
1099 case THREAD_FINISH: {
1100 Thread *th = get_thread(curr);
1101 while (!th->wait_list_empty()) {
1102 ModelAction *act = th->pop_wait_list();
1103 scheduler->wake(get_thread(act));
1106 /* Completed thread can't satisfy promises */
1107 for (unsigned int i = 0; i < promises->size(); i++) {
1108 Promise *promise = (*promises)[i];
1109 if (promise->thread_is_available(th->get_id()))
1110 if (promise->eliminate_thread(th->get_id()))
1111 priv->failed_promise = true;
1113 updated = true; /* trigger rel-seq checks */
1116 case THREAD_START: {
1117 check_promises(curr->get_tid(), NULL, curr->get_cv());
1128 * @brief Process the current action for release sequence fixup activity
1130 * Performs model-checker release sequence fixups for the current action,
1131 * forcing a single pending release sequence to break (with a given, potential
1132 * "loose" write) or to complete (i.e., synchronize). If a pending release
1133 * sequence forms a complete release sequence, then we must perform the fixup
1134 * synchronization, mo_graph additions, etc.
1136 * @param curr The current action; must be a release sequence fixup action
1137 * @param work_queue The work queue to which to add work items as they are
1140 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1142 const ModelAction *write = curr->get_node()->get_relseq_break();
1143 struct release_seq *sequence = pending_rel_seqs->back();
1144 pending_rel_seqs->pop_back();
1146 ModelAction *acquire = sequence->acquire;
1147 const ModelAction *rf = sequence->rf;
1148 const ModelAction *release = sequence->release;
1152 ASSERT(release->same_thread(rf));
1154 if (write == NULL) {
1156 * @todo Forcing a synchronization requires that we set
1157 * modification order constraints. For instance, we can't allow
1158 * a fixup sequence in which two separate read-acquire
1159 * operations read from the same sequence, where the first one
1160 * synchronizes and the other doesn't. Essentially, we can't
1161 * allow any writes to insert themselves between 'release' and
1165 /* Must synchronize */
1166 if (!acquire->synchronize_with(release)) {
1167 set_bad_synchronization();
1170 /* Re-check all pending release sequences */
1171 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1172 /* Re-check act for mo_graph edges */
1173 work_queue->push_back(MOEdgeWorkEntry(acquire));
1175 /* propagate synchronization to later actions */
1176 action_list_t::reverse_iterator rit = action_trace->rbegin();
1177 for (; (*rit) != acquire; rit++) {
1178 ModelAction *propagate = *rit;
1179 if (acquire->happens_before(propagate)) {
1180 propagate->synchronize_with(acquire);
1181 /* Re-check 'propagate' for mo_graph edges */
1182 work_queue->push_back(MOEdgeWorkEntry(propagate));
1186 /* Break release sequence with new edges:
1187 * release --mo--> write --mo--> rf */
1188 mo_graph->addEdge(release, write);
1189 mo_graph->addEdge(write, rf);
1192 /* See if we have realized a data race */
1197 * Initialize the current action by performing one or more of the following
1198 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1199 * in the NodeStack, manipulating backtracking sets, allocating and
1200 * initializing clock vectors, and computing the promises to fulfill.
1202 * @param curr The current action, as passed from the user context; may be
1203 * freed/invalidated after the execution of this function, with a different
1204 * action "returned" its place (pass-by-reference)
1205 * @return True if curr is a newly-explored action; false otherwise
1207 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1209 ModelAction *newcurr;
1211 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1212 newcurr = process_rmw(*curr);
1215 if (newcurr->is_rmw())
1216 compute_promises(newcurr);
1222 (*curr)->set_seq_number(get_next_seq_num());
1224 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1226 /* First restore type and order in case of RMW operation */
1227 if ((*curr)->is_rmwr())
1228 newcurr->copy_typeandorder(*curr);
1230 ASSERT((*curr)->get_location() == newcurr->get_location());
1231 newcurr->copy_from_new(*curr);
1233 /* Discard duplicate ModelAction; use action from NodeStack */
1236 /* Always compute new clock vector */
1237 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1240 return false; /* Action was explored previously */
1244 /* Always compute new clock vector */
1245 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1247 /* Assign most recent release fence */
1248 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1251 * Perform one-time actions when pushing new ModelAction onto
1254 if (newcurr->is_write())
1255 compute_promises(newcurr);
1256 else if (newcurr->is_relseq_fixup())
1257 compute_relseq_breakwrites(newcurr);
1258 else if (newcurr->is_wait())
1259 newcurr->get_node()->set_misc_max(2);
1260 else if (newcurr->is_notify_one()) {
1261 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1263 return true; /* This was a new ModelAction */
1268 * @brief Establish reads-from relation between two actions
1270 * Perform basic operations involved with establishing a concrete rf relation,
1271 * including setting the ModelAction data and checking for release sequences.
1273 * @param act The action that is reading (must be a read)
1274 * @param rf The action from which we are reading (must be a write)
1276 * @return True if this read established synchronization
1278 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1280 act->set_read_from(rf);
1281 if (rf != NULL && act->is_acquire()) {
1282 rel_heads_list_t release_heads;
1283 get_release_seq_heads(act, act, &release_heads);
1284 int num_heads = release_heads.size();
1285 for (unsigned int i = 0; i < release_heads.size(); i++)
1286 if (!act->synchronize_with(release_heads[i])) {
1287 set_bad_synchronization();
1290 return num_heads > 0;
1296 * @brief Check whether a model action is enabled.
1298 * Checks whether a lock or join operation would be successful (i.e., is the
1299 * lock already locked, or is the joined thread already complete). If not, put
1300 * the action in a waiter list.
1302 * @param curr is the ModelAction to check whether it is enabled.
1303 * @return a bool that indicates whether the action is enabled.
1305 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1306 if (curr->is_lock()) {
1307 std::mutex *lock = (std::mutex *)curr->get_location();
1308 struct std::mutex_state *state = lock->get_state();
1309 if (state->islocked) {
1310 //Stick the action in the appropriate waiting queue
1311 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1314 } else if (curr->get_type() == THREAD_JOIN) {
1315 Thread *blocking = (Thread *)curr->get_location();
1316 if (!blocking->is_complete()) {
1317 blocking->push_wait_list(curr);
1326 * This is the heart of the model checker routine. It performs model-checking
1327 * actions corresponding to a given "current action." Among other processes, it
1328 * calculates reads-from relationships, updates synchronization clock vectors,
1329 * forms a memory_order constraints graph, and handles replay/backtrack
1330 * execution when running permutations of previously-observed executions.
1332 * @param curr The current action to process
1333 * @return The ModelAction that is actually executed; may be different than
1334 * curr; may be NULL, if the current action is not enabled to run
1336 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1339 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1341 if (!check_action_enabled(curr)) {
1342 /* Make the execution look like we chose to run this action
1343 * much later, when a lock/join can succeed */
1344 get_thread(curr)->set_pending(curr);
1345 scheduler->sleep(get_thread(curr));
1349 bool newly_explored = initialize_curr_action(&curr);
1355 wake_up_sleeping_actions(curr);
1357 /* Add the action to lists before any other model-checking tasks */
1358 if (!second_part_of_rmw)
1359 add_action_to_lists(curr);
1361 /* Build may_read_from set for newly-created actions */
1362 if (newly_explored && curr->is_read())
1363 build_reads_from_past(curr);
1365 /* Initialize work_queue with the "current action" work */
1366 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1367 while (!work_queue.empty() && !has_asserted()) {
1368 WorkQueueEntry work = work_queue.front();
1369 work_queue.pop_front();
1371 switch (work.type) {
1372 case WORK_CHECK_CURR_ACTION: {
1373 ModelAction *act = work.action;
1374 bool update = false; /* update this location's release seq's */
1375 bool update_all = false; /* update all release seq's */
1377 if (process_thread_action(curr))
1380 if (act->is_read() && process_read(act, second_part_of_rmw))
1383 if (act->is_write() && process_write(act))
1386 if (act->is_fence() && process_fence(act))
1389 if (act->is_mutex_op() && process_mutex(act))
1392 if (act->is_relseq_fixup())
1393 process_relseq_fixup(curr, &work_queue);
1396 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1398 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1401 case WORK_CHECK_RELEASE_SEQ:
1402 resolve_release_sequences(work.location, &work_queue);
1404 case WORK_CHECK_MO_EDGES: {
1405 /** @todo Complete verification of work_queue */
1406 ModelAction *act = work.action;
1407 bool updated = false;
1409 if (act->is_read()) {
1410 const ModelAction *rf = act->get_reads_from();
1411 const Promise *promise = act->get_reads_from_promise();
1413 if (r_modification_order(act, rf))
1415 } else if (promise) {
1416 if (r_modification_order(act, promise))
1420 if (act->is_write()) {
1421 if (w_modification_order(act))
1424 mo_graph->commitChanges();
1427 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1436 check_curr_backtracking(curr);
1437 set_backtracking(curr);
1441 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1443 Node *currnode = curr->get_node();
1444 Node *parnode = currnode->get_parent();
1446 if ((parnode && !parnode->backtrack_empty()) ||
1447 !currnode->misc_empty() ||
1448 !currnode->read_from_empty() ||
1449 !currnode->future_value_empty() ||
1450 !currnode->promise_empty() ||
1451 !currnode->relseq_break_empty()) {
1452 set_latest_backtrack(curr);
1456 bool ModelChecker::promises_expired() const
1458 for (unsigned int i = 0; i < promises->size(); i++) {
1459 Promise *promise = (*promises)[i];
1460 if (promise->get_expiration() < priv->used_sequence_numbers)
1467 * This is the strongest feasibility check available.
1468 * @return whether the current trace (partial or complete) must be a prefix of
1471 bool ModelChecker::isfeasibleprefix() const
1473 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1477 * Print disagnostic information about an infeasible execution
1478 * @param prefix A string to prefix the output with; if NULL, then a default
1479 * message prefix will be provided
1481 void ModelChecker::print_infeasibility(const char *prefix) const
1485 if (mo_graph->checkForCycles())
1486 ptr += sprintf(ptr, "[mo cycle]");
1487 if (priv->failed_promise)
1488 ptr += sprintf(ptr, "[failed promise]");
1489 if (priv->too_many_reads)
1490 ptr += sprintf(ptr, "[too many reads]");
1491 if (priv->no_valid_reads)
1492 ptr += sprintf(ptr, "[no valid reads-from]");
1493 if (priv->bad_synchronization)
1494 ptr += sprintf(ptr, "[bad sw ordering]");
1495 if (promises_expired())
1496 ptr += sprintf(ptr, "[promise expired]");
1497 if (promises->size() != 0)
1498 ptr += sprintf(ptr, "[unresolved promise]");
1500 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1504 * Returns whether the current completed trace is feasible, except for pending
1505 * release sequences.
1507 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1509 return !is_infeasible() && promises->size() == 0;
1513 * Check if the current partial trace is infeasible. Does not check any
1514 * end-of-execution flags, which might rule out the execution. Thus, this is
1515 * useful only for ruling an execution as infeasible.
1516 * @return whether the current partial trace is infeasible.
1518 bool ModelChecker::is_infeasible() const
1520 return mo_graph->checkForCycles() ||
1521 priv->no_valid_reads ||
1522 priv->failed_promise ||
1523 priv->too_many_reads ||
1524 priv->bad_synchronization ||
1528 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1529 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1530 ModelAction *lastread = get_last_action(act->get_tid());
1531 lastread->process_rmw(act);
1532 if (act->is_rmw()) {
1533 if (lastread->get_reads_from())
1534 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1536 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1537 mo_graph->commitChanges();
1543 * Checks whether a thread has read from the same write for too many times
1544 * without seeing the effects of a later write.
1547 * 1) there must a different write that we could read from that would satisfy the modification order,
1548 * 2) we must have read from the same value in excess of maxreads times, and
1549 * 3) that other write must have been in the reads_from set for maxreads times.
1551 * If so, we decide that the execution is no longer feasible.
1553 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1555 if (params.maxreads != 0) {
1556 if (curr->get_node()->get_read_from_size() <= 1)
1558 //Must make sure that execution is currently feasible... We could
1559 //accidentally clear by rolling back
1560 if (is_infeasible())
1562 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1563 int tid = id_to_int(curr->get_tid());
1566 if ((int)thrd_lists->size() <= tid)
1568 action_list_t *list = &(*thrd_lists)[tid];
1570 action_list_t::reverse_iterator rit = list->rbegin();
1571 /* Skip past curr */
1572 for (; (*rit) != curr; rit++)
1574 /* go past curr now */
1577 action_list_t::reverse_iterator ritcopy = rit;
1578 //See if we have enough reads from the same value
1580 for (; count < params.maxreads; rit++, count++) {
1581 if (rit == list->rend())
1583 ModelAction *act = *rit;
1584 if (!act->is_read())
1587 if (act->get_reads_from() != rf)
1589 if (act->get_node()->get_read_from_size() <= 1)
1592 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1594 const ModelAction *write = curr->get_node()->get_read_from_at(i);
1596 /* Need a different write */
1600 /* Test to see whether this is a feasible write to read from */
1601 /** NOTE: all members of read-from set should be
1602 * feasible, so we no longer check it here **/
1606 bool feasiblewrite = true;
1607 //new we need to see if this write works for everyone
1609 for (int loop = count; loop > 0; loop--, rit++) {
1610 ModelAction *act = *rit;
1611 bool foundvalue = false;
1612 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1613 if (act->get_node()->get_read_from_at(j) == write) {
1619 feasiblewrite = false;
1623 if (feasiblewrite) {
1624 priv->too_many_reads = true;
1632 * Updates the mo_graph with the constraints imposed from the current
1635 * Basic idea is the following: Go through each other thread and find
1636 * the last action that happened before our read. Two cases:
1638 * (1) The action is a write => that write must either occur before
1639 * the write we read from or be the write we read from.
1641 * (2) The action is a read => the write that that action read from
1642 * must occur before the write we read from or be the same write.
1644 * @param curr The current action. Must be a read.
1645 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1646 * @return True if modification order edges were added; false otherwise
1648 template <typename rf_type>
1649 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1651 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1654 ASSERT(curr->is_read());
1656 /* Last SC fence in the current thread */
1657 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1659 /* Iterate over all threads */
1660 for (i = 0; i < thrd_lists->size(); i++) {
1661 /* Last SC fence in thread i */
1662 ModelAction *last_sc_fence_thread_local = NULL;
1663 if (int_to_id((int)i) != curr->get_tid())
1664 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1666 /* Last SC fence in thread i, before last SC fence in current thread */
1667 ModelAction *last_sc_fence_thread_before = NULL;
1668 if (last_sc_fence_local)
1669 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1671 /* Iterate over actions in thread, starting from most recent */
1672 action_list_t *list = &(*thrd_lists)[i];
1673 action_list_t::reverse_iterator rit;
1674 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1675 ModelAction *act = *rit;
1677 if (act->is_write() && !act->equals(rf) && act != curr) {
1678 /* C++, Section 29.3 statement 5 */
1679 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1680 *act < *last_sc_fence_thread_local) {
1681 added = mo_graph->addEdge(act, rf) || added;
1684 /* C++, Section 29.3 statement 4 */
1685 else if (act->is_seqcst() && last_sc_fence_local &&
1686 *act < *last_sc_fence_local) {
1687 added = mo_graph->addEdge(act, rf) || added;
1690 /* C++, Section 29.3 statement 6 */
1691 else if (last_sc_fence_thread_before &&
1692 *act < *last_sc_fence_thread_before) {
1693 added = mo_graph->addEdge(act, rf) || added;
1699 * Include at most one act per-thread that "happens
1700 * before" curr. Don't consider reflexively.
1702 if (act->happens_before(curr) && act != curr) {
1703 if (act->is_write()) {
1704 if (!act->equals(rf)) {
1705 added = mo_graph->addEdge(act, rf) || added;
1708 const ModelAction *prevreadfrom = act->get_reads_from();
1709 //if the previous read is unresolved, keep going...
1710 if (prevreadfrom == NULL)
1713 if (!prevreadfrom->equals(rf)) {
1714 added = mo_graph->addEdge(prevreadfrom, rf) || added;
1723 * All compatible, thread-exclusive promises must be ordered after any
1724 * concrete loads from the same thread
1726 for (unsigned int i = 0; i < promises->size(); i++)
1727 if ((*promises)[i]->is_compatible_exclusive(curr))
1728 added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1734 * Updates the mo_graph with the constraints imposed from the current write.
1736 * Basic idea is the following: Go through each other thread and find
1737 * the lastest action that happened before our write. Two cases:
1739 * (1) The action is a write => that write must occur before
1742 * (2) The action is a read => the write that that action read from
1743 * must occur before the current write.
1745 * This method also handles two other issues:
1747 * (I) Sequential Consistency: Making sure that if the current write is
1748 * seq_cst, that it occurs after the previous seq_cst write.
1750 * (II) Sending the write back to non-synchronizing reads.
1752 * @param curr The current action. Must be a write.
1753 * @return True if modification order edges were added; false otherwise
1755 bool ModelChecker::w_modification_order(ModelAction *curr)
1757 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1760 ASSERT(curr->is_write());
1762 if (curr->is_seqcst()) {
1763 /* We have to at least see the last sequentially consistent write,
1764 so we are initialized. */
1765 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1766 if (last_seq_cst != NULL) {
1767 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1771 /* Last SC fence in the current thread */
1772 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1774 /* Iterate over all threads */
1775 for (i = 0; i < thrd_lists->size(); i++) {
1776 /* Last SC fence in thread i, before last SC fence in current thread */
1777 ModelAction *last_sc_fence_thread_before = NULL;
1778 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1779 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1781 /* Iterate over actions in thread, starting from most recent */
1782 action_list_t *list = &(*thrd_lists)[i];
1783 action_list_t::reverse_iterator rit;
1784 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1785 ModelAction *act = *rit;
1788 * 1) If RMW and it actually read from something, then we
1789 * already have all relevant edges, so just skip to next
1792 * 2) If RMW and it didn't read from anything, we should
1793 * whatever edge we can get to speed up convergence.
1795 * 3) If normal write, we need to look at earlier actions, so
1796 * continue processing list.
1798 if (curr->is_rmw()) {
1799 if (curr->get_reads_from() != NULL)
1807 /* C++, Section 29.3 statement 7 */
1808 if (last_sc_fence_thread_before && act->is_write() &&
1809 *act < *last_sc_fence_thread_before) {
1810 added = mo_graph->addEdge(act, curr) || added;
1815 * Include at most one act per-thread that "happens
1818 if (act->happens_before(curr)) {
1820 * Note: if act is RMW, just add edge:
1822 * The following edge should be handled elsewhere:
1823 * readfrom(act) --mo--> act
1825 if (act->is_write())
1826 added = mo_graph->addEdge(act, curr) || added;
1827 else if (act->is_read()) {
1828 //if previous read accessed a null, just keep going
1829 if (act->get_reads_from() == NULL)
1831 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1834 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1835 !act->same_thread(curr)) {
1836 /* We have an action that:
1837 (1) did not happen before us
1838 (2) is a read and we are a write
1839 (3) cannot synchronize with us
1840 (4) is in a different thread
1842 that read could potentially read from our write. Note that
1843 these checks are overly conservative at this point, we'll
1844 do more checks before actually removing the
1848 if (thin_air_constraint_may_allow(curr, act)) {
1849 if (!is_infeasible())
1850 futurevalues->push_back(PendingFutureValue(curr, act));
1851 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1852 add_future_value(curr, act);
1859 * All compatible, thread-exclusive promises must be ordered after any
1860 * concrete stores to the same thread, or else they can be merged with
1863 for (unsigned int i = 0; i < promises->size(); i++)
1864 if ((*promises)[i]->is_compatible_exclusive(curr))
1865 added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1870 /** Arbitrary reads from the future are not allowed. Section 29.3
1871 * part 9 places some constraints. This method checks one result of constraint
1872 * constraint. Others require compiler support. */
1873 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1875 if (!writer->is_rmw())
1878 if (!reader->is_rmw())
1881 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1882 if (search == reader)
1884 if (search->get_tid() == reader->get_tid() &&
1885 search->happens_before(reader))
1893 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1894 * some constraints. This method checks one the following constraint (others
1895 * require compiler support):
1897 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1899 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1901 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1903 /* Iterate over all threads */
1904 for (i = 0; i < thrd_lists->size(); i++) {
1905 const ModelAction *write_after_read = NULL;
1907 /* Iterate over actions in thread, starting from most recent */
1908 action_list_t *list = &(*thrd_lists)[i];
1909 action_list_t::reverse_iterator rit;
1910 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1911 ModelAction *act = *rit;
1913 /* Don't disallow due to act == reader */
1914 if (!reader->happens_before(act) || reader == act)
1916 else if (act->is_write())
1917 write_after_read = act;
1918 else if (act->is_read() && act->get_reads_from() != NULL)
1919 write_after_read = act->get_reads_from();
1922 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1929 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1930 * The ModelAction under consideration is expected to be taking part in
1931 * release/acquire synchronization as an object of the "reads from" relation.
1932 * Note that this can only provide release sequence support for RMW chains
1933 * which do not read from the future, as those actions cannot be traced until
1934 * their "promise" is fulfilled. Similarly, we may not even establish the
1935 * presence of a release sequence with certainty, as some modification order
1936 * constraints may be decided further in the future. Thus, this function
1937 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1938 * and a boolean representing certainty.
1940 * @param rf The action that might be part of a release sequence. Must be a
1942 * @param release_heads A pass-by-reference style return parameter. After
1943 * execution of this function, release_heads will contain the heads of all the
1944 * relevant release sequences, if any exists with certainty
1945 * @param pending A pass-by-reference style return parameter which is only used
1946 * when returning false (i.e., uncertain). Returns most information regarding
1947 * an uncertain release sequence, including any write operations that might
1948 * break the sequence.
1949 * @return true, if the ModelChecker is certain that release_heads is complete;
1952 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1953 rel_heads_list_t *release_heads,
1954 struct release_seq *pending) const
1956 /* Only check for release sequences if there are no cycles */
1957 if (mo_graph->checkForCycles())
1960 for ( ; rf != NULL; rf = rf->get_reads_from()) {
1961 ASSERT(rf->is_write());
1963 if (rf->is_release())
1964 release_heads->push_back(rf);
1965 else if (rf->get_last_fence_release())
1966 release_heads->push_back(rf->get_last_fence_release());
1968 break; /* End of RMW chain */
1970 /** @todo Need to be smarter here... In the linux lock
1971 * example, this will run to the beginning of the program for
1973 /** @todo The way to be smarter here is to keep going until 1
1974 * thread has a release preceded by an acquire and you've seen
1977 /* acq_rel RMW is a sufficient stopping condition */
1978 if (rf->is_acquire() && rf->is_release())
1979 return true; /* complete */
1982 /* read from future: need to settle this later */
1984 return false; /* incomplete */
1987 if (rf->is_release())
1988 return true; /* complete */
1990 /* else relaxed write
1991 * - check for fence-release in the same thread (29.8, stmt. 3)
1992 * - check modification order for contiguous subsequence
1993 * -> rf must be same thread as release */
1995 const ModelAction *fence_release = rf->get_last_fence_release();
1996 /* Synchronize with a fence-release unconditionally; we don't need to
1997 * find any more "contiguous subsequence..." for it */
1999 release_heads->push_back(fence_release);
2001 int tid = id_to_int(rf->get_tid());
2002 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2003 action_list_t *list = &(*thrd_lists)[tid];
2004 action_list_t::const_reverse_iterator rit;
2006 /* Find rf in the thread list */
2007 rit = std::find(list->rbegin(), list->rend(), rf);
2008 ASSERT(rit != list->rend());
2010 /* Find the last {write,fence}-release */
2011 for (; rit != list->rend(); rit++) {
2012 if (fence_release && *(*rit) < *fence_release)
2014 if ((*rit)->is_release())
2017 if (rit == list->rend()) {
2018 /* No write-release in this thread */
2019 return true; /* complete */
2020 } else if (fence_release && *(*rit) < *fence_release) {
2021 /* The fence-release is more recent (and so, "stronger") than
2022 * the most recent write-release */
2023 return true; /* complete */
2024 } /* else, need to establish contiguous release sequence */
2025 ModelAction *release = *rit;
2027 ASSERT(rf->same_thread(release));
2029 pending->writes.clear();
2031 bool certain = true;
2032 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2033 if (id_to_int(rf->get_tid()) == (int)i)
2035 list = &(*thrd_lists)[i];
2037 /* Can we ensure no future writes from this thread may break
2038 * the release seq? */
2039 bool future_ordered = false;
2041 ModelAction *last = get_last_action(int_to_id(i));
2042 Thread *th = get_thread(int_to_id(i));
2043 if ((last && rf->happens_before(last)) ||
2046 future_ordered = true;
2048 ASSERT(!th->is_model_thread() || future_ordered);
2050 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2051 const ModelAction *act = *rit;
2052 /* Reach synchronization -> this thread is complete */
2053 if (act->happens_before(release))
2055 if (rf->happens_before(act)) {
2056 future_ordered = true;
2060 /* Only non-RMW writes can break release sequences */
2061 if (!act->is_write() || act->is_rmw())
2064 /* Check modification order */
2065 if (mo_graph->checkReachable(rf, act)) {
2066 /* rf --mo--> act */
2067 future_ordered = true;
2070 if (mo_graph->checkReachable(act, release))
2071 /* act --mo--> release */
2073 if (mo_graph->checkReachable(release, act) &&
2074 mo_graph->checkReachable(act, rf)) {
2075 /* release --mo-> act --mo--> rf */
2076 return true; /* complete */
2078 /* act may break release sequence */
2079 pending->writes.push_back(act);
2082 if (!future_ordered)
2083 certain = false; /* This thread is uncertain */
2087 release_heads->push_back(release);
2088 pending->writes.clear();
2090 pending->release = release;
2097 * An interface for getting the release sequence head(s) with which a
2098 * given ModelAction must synchronize. This function only returns a non-empty
2099 * result when it can locate a release sequence head with certainty. Otherwise,
2100 * it may mark the internal state of the ModelChecker so that it will handle
2101 * the release sequence at a later time, causing @a acquire to update its
2102 * synchronization at some later point in execution.
2104 * @param acquire The 'acquire' action that may synchronize with a release
2106 * @param read The read action that may read from a release sequence; this may
2107 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2108 * when 'acquire' is a fence-acquire)
2109 * @param release_heads A pass-by-reference return parameter. Will be filled
2110 * with the head(s) of the release sequence(s), if they exists with certainty.
2111 * @see ModelChecker::release_seq_heads
2113 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2114 ModelAction *read, rel_heads_list_t *release_heads)
2116 const ModelAction *rf = read->get_reads_from();
2117 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2118 sequence->acquire = acquire;
2119 sequence->read = read;
2121 if (!release_seq_heads(rf, release_heads, sequence)) {
2122 /* add act to 'lazy checking' list */
2123 pending_rel_seqs->push_back(sequence);
2125 snapshot_free(sequence);
2130 * Attempt to resolve all stashed operations that might synchronize with a
2131 * release sequence for a given location. This implements the "lazy" portion of
2132 * determining whether or not a release sequence was contiguous, since not all
2133 * modification order information is present at the time an action occurs.
2135 * @param location The location/object that should be checked for release
2136 * sequence resolutions. A NULL value means to check all locations.
2137 * @param work_queue The work queue to which to add work items as they are
2139 * @return True if any updates occurred (new synchronization, new mo_graph
2142 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2144 bool updated = false;
2145 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2146 while (it != pending_rel_seqs->end()) {
2147 struct release_seq *pending = *it;
2148 ModelAction *acquire = pending->acquire;
2149 const ModelAction *read = pending->read;
2151 /* Only resolve sequences on the given location, if provided */
2152 if (location && read->get_location() != location) {
2157 const ModelAction *rf = read->get_reads_from();
2158 rel_heads_list_t release_heads;
2160 complete = release_seq_heads(rf, &release_heads, pending);
2161 for (unsigned int i = 0; i < release_heads.size(); i++) {
2162 if (!acquire->has_synchronized_with(release_heads[i])) {
2163 if (acquire->synchronize_with(release_heads[i]))
2166 set_bad_synchronization();
2171 /* Re-check all pending release sequences */
2172 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2173 /* Re-check read-acquire for mo_graph edges */
2174 if (acquire->is_read())
2175 work_queue->push_back(MOEdgeWorkEntry(acquire));
2177 /* propagate synchronization to later actions */
2178 action_list_t::reverse_iterator rit = action_trace->rbegin();
2179 for (; (*rit) != acquire; rit++) {
2180 ModelAction *propagate = *rit;
2181 if (acquire->happens_before(propagate)) {
2182 propagate->synchronize_with(acquire);
2183 /* Re-check 'propagate' for mo_graph edges */
2184 work_queue->push_back(MOEdgeWorkEntry(propagate));
2189 it = pending_rel_seqs->erase(it);
2190 snapshot_free(pending);
2196 // If we resolved promises or data races, see if we have realized a data race.
2203 * Performs various bookkeeping operations for the current ModelAction. For
2204 * instance, adds action to the per-object, per-thread action vector and to the
2205 * action trace list of all thread actions.
2207 * @param act is the ModelAction to add.
2209 void ModelChecker::add_action_to_lists(ModelAction *act)
2211 int tid = id_to_int(act->get_tid());
2212 ModelAction *uninit = NULL;
2214 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2215 if (list->empty() && act->is_atomic_var()) {
2216 uninit = new_uninitialized_action(act->get_location());
2217 uninit_id = id_to_int(uninit->get_tid());
2218 list->push_back(uninit);
2220 list->push_back(act);
2222 action_trace->push_back(act);
2224 action_trace->push_front(uninit);
2226 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2227 if (tid >= (int)vec->size())
2228 vec->resize(priv->next_thread_id);
2229 (*vec)[tid].push_back(act);
2231 (*vec)[uninit_id].push_front(uninit);
2233 if ((int)thrd_last_action->size() <= tid)
2234 thrd_last_action->resize(get_num_threads());
2235 (*thrd_last_action)[tid] = act;
2237 (*thrd_last_action)[uninit_id] = uninit;
2239 if (act->is_fence() && act->is_release()) {
2240 if ((int)thrd_last_fence_release->size() <= tid)
2241 thrd_last_fence_release->resize(get_num_threads());
2242 (*thrd_last_fence_release)[tid] = act;
2245 if (act->is_wait()) {
2246 void *mutex_loc = (void *) act->get_value();
2247 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2249 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2250 if (tid >= (int)vec->size())
2251 vec->resize(priv->next_thread_id);
2252 (*vec)[tid].push_back(act);
2257 * @brief Get the last action performed by a particular Thread
2258 * @param tid The thread ID of the Thread in question
2259 * @return The last action in the thread
2261 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2263 int threadid = id_to_int(tid);
2264 if (threadid < (int)thrd_last_action->size())
2265 return (*thrd_last_action)[id_to_int(tid)];
2271 * @brief Get the last fence release performed by a particular Thread
2272 * @param tid The thread ID of the Thread in question
2273 * @return The last fence release in the thread, if one exists; NULL otherwise
2275 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2277 int threadid = id_to_int(tid);
2278 if (threadid < (int)thrd_last_fence_release->size())
2279 return (*thrd_last_fence_release)[id_to_int(tid)];
2285 * Gets the last memory_order_seq_cst write (in the total global sequence)
2286 * performed on a particular object (i.e., memory location), not including the
2288 * @param curr The current ModelAction; also denotes the object location to
2290 * @return The last seq_cst write
2292 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2294 void *location = curr->get_location();
2295 action_list_t *list = get_safe_ptr_action(obj_map, location);
2296 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2297 action_list_t::reverse_iterator rit;
2298 for (rit = list->rbegin(); rit != list->rend(); rit++)
2299 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2305 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2306 * performed in a particular thread, prior to a particular fence.
2307 * @param tid The ID of the thread to check
2308 * @param before_fence The fence from which to begin the search; if NULL, then
2309 * search for the most recent fence in the thread.
2310 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2312 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2314 /* All fences should have NULL location */
2315 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2316 action_list_t::reverse_iterator rit = list->rbegin();
2319 for (; rit != list->rend(); rit++)
2320 if (*rit == before_fence)
2323 ASSERT(*rit == before_fence);
2327 for (; rit != list->rend(); rit++)
2328 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2334 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2335 * location). This function identifies the mutex according to the current
2336 * action, which is presumed to perform on the same mutex.
2337 * @param curr The current ModelAction; also denotes the object location to
2339 * @return The last unlock operation
2341 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2343 void *location = curr->get_location();
2344 action_list_t *list = get_safe_ptr_action(obj_map, location);
2345 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2346 action_list_t::reverse_iterator rit;
2347 for (rit = list->rbegin(); rit != list->rend(); rit++)
2348 if ((*rit)->is_unlock() || (*rit)->is_wait())
2353 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2355 ModelAction *parent = get_last_action(tid);
2357 parent = get_thread(tid)->get_creation();
2362 * Returns the clock vector for a given thread.
2363 * @param tid The thread whose clock vector we want
2364 * @return Desired clock vector
2366 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2368 return get_parent_action(tid)->get_cv();
2372 * Resolve a set of Promises with a current write. The set is provided in the
2373 * Node corresponding to @a write.
2374 * @param write The ModelAction that is fulfilling Promises
2375 * @return True if promises were resolved; false otherwise
2377 bool ModelChecker::resolve_promises(ModelAction *write)
2379 bool haveResolved = false;
2380 std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2381 promise_list_t mustResolve, resolved;
2383 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2384 Promise *promise = (*promises)[promise_index];
2385 if (write->get_node()->get_promise(i)) {
2386 ModelAction *read = promise->get_action();
2387 read_from(read, write);
2388 //Make sure the promise's value matches the write's value
2389 ASSERT(promise->is_compatible(write));
2390 mo_graph->resolvePromise(read, write, &mustResolve);
2392 resolved.push_back(promise);
2393 promises->erase(promises->begin() + promise_index);
2394 actions_to_check.push_back(read);
2396 haveResolved = true;
2401 for (unsigned int i = 0; i < mustResolve.size(); i++) {
2402 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2404 priv->failed_promise = true;
2406 for (unsigned int i = 0; i < resolved.size(); i++)
2408 //Check whether reading these writes has made threads unable to
2411 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2412 ModelAction *read = actions_to_check[i];
2413 mo_check_promises(read, true);
2416 return haveResolved;
2420 * Compute the set of promises that could potentially be satisfied by this
2421 * action. Note that the set computation actually appears in the Node, not in
2423 * @param curr The ModelAction that may satisfy promises
2425 void ModelChecker::compute_promises(ModelAction *curr)
2427 for (unsigned int i = 0; i < promises->size(); i++) {
2428 Promise *promise = (*promises)[i];
2429 const ModelAction *act = promise->get_action();
2430 if (!act->happens_before(curr) &&
2432 !act->could_synchronize_with(curr) &&
2433 !act->same_thread(curr) &&
2434 act->get_location() == curr->get_location() &&
2435 promise->get_value() == curr->get_value()) {
2436 curr->get_node()->set_promise(i, act->is_rmw());
2441 /** Checks promises in response to change in ClockVector Threads. */
2442 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2444 for (unsigned int i = 0; i < promises->size(); i++) {
2445 Promise *promise = (*promises)[i];
2446 const ModelAction *act = promise->get_action();
2447 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2448 merge_cv->synchronized_since(act)) {
2449 if (promise->eliminate_thread(tid)) {
2450 //Promise has failed
2451 priv->failed_promise = true;
2458 void ModelChecker::check_promises_thread_disabled()
2460 for (unsigned int i = 0; i < promises->size(); i++) {
2461 Promise *promise = (*promises)[i];
2462 if (promise->has_failed()) {
2463 priv->failed_promise = true;
2470 * @brief Checks promises in response to addition to modification order for
2473 * We test whether threads are still available for satisfying promises after an
2474 * addition to our modification order constraints. Those that are unavailable
2475 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2476 * that promise has failed.
2478 * @param act The ModelAction which updated the modification order
2479 * @param is_read_check Should be true if act is a read and we must check for
2480 * updates to the store from which it read (there is a distinction here for
2481 * RMW's, which are both a load and a store)
2483 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2485 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2487 for (unsigned int i = 0; i < promises->size(); i++) {
2488 Promise *promise = (*promises)[i];
2489 const ModelAction *pread = promise->get_action();
2491 // Is this promise on the same location?
2492 if (!pread->same_var(write))
2495 if (pread->happens_before(act) && mo_graph->checkPromise(write, promise)) {
2496 priv->failed_promise = true;
2500 // Don't do any lookups twice for the same thread
2501 if (!promise->thread_is_available(act->get_tid()))
2504 if (mo_graph->checkReachable(promise, write)) {
2505 if (mo_graph->checkPromise(write, promise)) {
2506 priv->failed_promise = true;
2514 * Compute the set of writes that may break the current pending release
2515 * sequence. This information is extracted from previou release sequence
2518 * @param curr The current ModelAction. Must be a release sequence fixup
2521 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2523 if (pending_rel_seqs->empty())
2526 struct release_seq *pending = pending_rel_seqs->back();
2527 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2528 const ModelAction *write = pending->writes[i];
2529 curr->get_node()->add_relseq_break(write);
2532 /* NULL means don't break the sequence; just synchronize */
2533 curr->get_node()->add_relseq_break(NULL);
2537 * Build up an initial set of all past writes that this 'read' action may read
2538 * from. This set is determined by the clock vector's "happens before"
2540 * @param curr is the current ModelAction that we are exploring; it must be a
2543 void ModelChecker::build_reads_from_past(ModelAction *curr)
2545 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2547 ASSERT(curr->is_read());
2549 ModelAction *last_sc_write = NULL;
2551 if (curr->is_seqcst())
2552 last_sc_write = get_last_seq_cst_write(curr);
2554 /* Iterate over all threads */
2555 for (i = 0; i < thrd_lists->size(); i++) {
2556 /* Iterate over actions in thread, starting from most recent */
2557 action_list_t *list = &(*thrd_lists)[i];
2558 action_list_t::reverse_iterator rit;
2559 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2560 ModelAction *act = *rit;
2562 /* Only consider 'write' actions */
2563 if (!act->is_write() || act == curr)
2566 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2567 bool allow_read = true;
2569 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2571 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2575 /* Only add feasible reads */
2576 mo_graph->startChanges();
2577 r_modification_order(curr, act);
2578 if (!is_infeasible())
2579 curr->get_node()->add_read_from(act);
2580 mo_graph->rollbackChanges();
2583 /* Include at most one act per-thread that "happens before" curr */
2584 if (act->happens_before(curr))
2588 /* We may find no valid may-read-from only if the execution is doomed */
2589 if (!curr->get_node()->get_read_from_size()) {
2590 priv->no_valid_reads = true;
2594 if (DBG_ENABLED()) {
2595 model_print("Reached read action:\n");
2597 model_print("Printing may_read_from\n");
2598 curr->get_node()->print_may_read_from();
2599 model_print("End printing may_read_from\n");
2603 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2605 for ( ; write != NULL; write = write->get_reads_from()) {
2606 /* UNINIT actions don't have a Node, and they never sleep */
2607 if (write->is_uninitialized())
2609 Node *prevnode = write->get_node()->get_parent();
2611 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2612 if (write->is_release() && thread_sleep)
2614 if (!write->is_rmw())
2621 * @brief Create a new action representing an uninitialized atomic
2622 * @param location The memory location of the atomic object
2623 * @return A pointer to a new ModelAction
2625 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2627 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2628 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2629 act->create_cv(NULL);
2633 static void print_list(action_list_t *list)
2635 action_list_t::iterator it;
2637 model_print("---------------------------------------------------------------------\n");
2639 unsigned int hash = 0;
2641 for (it = list->begin(); it != list->end(); it++) {
2643 hash = hash^(hash<<3)^((*it)->hash());
2645 model_print("HASH %u\n", hash);
2646 model_print("---------------------------------------------------------------------\n");
2649 #if SUPPORT_MOD_ORDER_DUMP
2650 void ModelChecker::dumpGraph(char *filename) const
2653 sprintf(buffer, "%s.dot", filename);
2654 FILE *file = fopen(buffer, "w");
2655 fprintf(file, "digraph %s {\n", filename);
2656 mo_graph->dumpNodes(file);
2657 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2659 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2660 ModelAction *action = *it;
2661 if (action->is_read()) {
2662 fprintf(file, "N%u [label=\"N%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2663 if (action->get_reads_from() != NULL)
2664 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2666 if (thread_array[action->get_tid()] != NULL) {
2667 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2670 thread_array[action->get_tid()] = action;
2672 fprintf(file, "}\n");
2673 model_free(thread_array);
2678 /** @brief Prints an execution trace summary. */
2679 void ModelChecker::print_summary() const
2681 #if SUPPORT_MOD_ORDER_DUMP
2682 char buffername[100];
2683 sprintf(buffername, "exec%04u", stats.num_total);
2684 mo_graph->dumpGraphToFile(buffername);
2685 sprintf(buffername, "graph%04u", stats.num_total);
2686 dumpGraph(buffername);
2689 model_print("Execution %d:", stats.num_total);
2690 if (isfeasibleprefix())
2693 print_infeasibility(" INFEASIBLE");
2694 print_list(action_trace);
2699 * Add a Thread to the system for the first time. Should only be called once
2701 * @param t The Thread to add
2703 void ModelChecker::add_thread(Thread *t)
2705 thread_map->put(id_to_int(t->get_id()), t);
2706 scheduler->add_thread(t);
2710 * Removes a thread from the scheduler.
2711 * @param the thread to remove.
2713 void ModelChecker::remove_thread(Thread *t)
2715 scheduler->remove_thread(t);
2719 * @brief Get a Thread reference by its ID
2720 * @param tid The Thread's ID
2721 * @return A Thread reference
2723 Thread * ModelChecker::get_thread(thread_id_t tid) const
2725 return thread_map->get(id_to_int(tid));
2729 * @brief Get a reference to the Thread in which a ModelAction was executed
2730 * @param act The ModelAction
2731 * @return A Thread reference
2733 Thread * ModelChecker::get_thread(const ModelAction *act) const
2735 return get_thread(act->get_tid());
2739 * @brief Check if a Thread is currently enabled
2740 * @param t The Thread to check
2741 * @return True if the Thread is currently enabled
2743 bool ModelChecker::is_enabled(Thread *t) const
2745 return scheduler->is_enabled(t);
2749 * @brief Check if a Thread is currently enabled
2750 * @param tid The ID of the Thread to check
2751 * @return True if the Thread is currently enabled
2753 bool ModelChecker::is_enabled(thread_id_t tid) const
2755 return scheduler->is_enabled(tid);
2759 * Switch from a model-checker context to a user-thread context. This is the
2760 * complement of ModelChecker::switch_to_master and must be called from the
2761 * model-checker context
2763 * @param thread The user-thread to switch to
2765 void ModelChecker::switch_from_master(Thread *thread)
2767 scheduler->set_current_thread(thread);
2768 Thread::swap(&system_context, thread);
2772 * Switch from a user-context to the "master thread" context (a.k.a. system
2773 * context). This switch is made with the intention of exploring a particular
2774 * model-checking action (described by a ModelAction object). Must be called
2775 * from a user-thread context.
2777 * @param act The current action that will be explored. May be NULL only if
2778 * trace is exiting via an assertion (see ModelChecker::set_assert and
2779 * ModelChecker::has_asserted).
2780 * @return Return the value returned by the current action
2782 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2785 Thread *old = thread_current();
2786 ASSERT(!old->get_pending());
2787 old->set_pending(act);
2788 if (Thread::swap(old, &system_context) < 0) {
2789 perror("swap threads");
2792 return old->get_return_value();
2796 * Takes the next step in the execution, if possible.
2797 * @param curr The current step to take
2798 * @return Returns the next Thread to run, if any; NULL if this execution
2801 Thread * ModelChecker::take_step(ModelAction *curr)
2803 Thread *curr_thrd = get_thread(curr);
2804 ASSERT(curr_thrd->get_state() == THREAD_READY);
2806 curr = check_current_action(curr);
2808 /* Infeasible -> don't take any more steps */
2809 if (is_infeasible())
2811 else if (isfeasibleprefix() && have_bug_reports()) {
2816 if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2819 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2820 scheduler->remove_thread(curr_thrd);
2822 Thread *next_thrd = get_next_thread(curr);
2824 DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2825 next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2830 /** Wrapper to run the user's main function, with appropriate arguments */
2831 void user_main_wrapper(void *)
2833 user_main(model->params.argc, model->params.argv);
2836 /** @brief Run ModelChecker for the user program */
2837 void ModelChecker::run()
2841 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2846 * Stash next pending action(s) for thread(s). There
2847 * should only need to stash one thread's action--the
2848 * thread which just took a step--plus the first step
2849 * for any newly-created thread
2851 for (unsigned int i = 0; i < get_num_threads(); i++) {
2852 thread_id_t tid = int_to_id(i);
2853 Thread *thr = get_thread(tid);
2854 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
2855 switch_from_master(thr);
2859 /* Catch assertions from prior take_step or from
2860 * between-ModelAction bugs (e.g., data races) */
2864 /* Consume the next action for a Thread */
2865 ModelAction *curr = t->get_pending();
2866 t->set_pending(NULL);
2867 t = take_step(curr);
2868 } while (t && !t->is_model_thread());
2871 * Launch end-of-execution release sequence fixups only when
2872 * the execution is otherwise feasible AND there are:
2874 * (1) pending release sequences
2875 * (2) pending assertions that could be invalidated by a change
2876 * in clock vectors (i.e., data races)
2877 * (3) no pending promises
2879 while (!pending_rel_seqs->empty() &&
2880 is_feasible_prefix_ignore_relseq() &&
2881 !unrealizedraces.empty()) {
2882 model_print("*** WARNING: release sequence fixup action "
2883 "(%zu pending release seuqence(s)) ***\n",
2884 pending_rel_seqs->size());
2885 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2886 std::memory_order_seq_cst, NULL, VALUE_NONE,
2890 } while (next_execution());
2892 model_print("******* Model-checking complete: *******\n");