10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
43 /* First thread created will have id INITIAL_THREAD_ID */
44 next_thread_id(INITIAL_THREAD_ID),
45 used_sequence_numbers(0),
49 failed_promise(false),
50 too_many_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 ModelAction *current_action;
62 unsigned int next_thread_id;
63 modelclock_t used_sequence_numbers;
64 ModelAction *next_backtrack;
65 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
66 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94 node_stack(new NodeStack()),
95 priv(new struct model_snapshot_members()),
96 mo_graph(new CycleGraph())
98 /* Initialize a model-checker thread, for special ModelActions */
99 model_thread = new Thread(get_next_id());
100 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
106 for (unsigned int i = 0; i < get_num_threads(); i++)
107 delete thread_map->get(i);
112 delete lock_waiters_map;
113 delete condvar_waiters_map;
116 for (unsigned int i = 0; i < promises->size(); i++)
117 delete (*promises)[i];
120 delete pending_rel_seqs;
122 delete thrd_last_action;
123 delete thrd_last_fence_release;
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
132 action_list_t *tmp = hash->get(ptr);
134 tmp = new action_list_t();
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
142 std::vector<action_list_t> *tmp = hash->get(ptr);
144 tmp = new std::vector<action_list_t>();
151 * Restores user program to initial state and resets all model-checker data
154 void ModelChecker::reset_to_initial_state()
156 DEBUG("+++ Resetting to initial state +++\n");
157 node_stack->reset_execution();
159 /* Print all model-checker output before rollback */
162 snapshotObject->backTrackBeforeStep(0);
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
168 return priv->next_thread_id++;
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
174 return priv->next_thread_id;
178 * Must be called from user-thread context (e.g., through the global
179 * thread_current() interface)
181 * @return The currently executing Thread.
183 Thread * ModelChecker::get_current_thread() const
185 return scheduler->get_current_thread();
188 /** @return a sequence number for a new ModelAction */
189 modelclock_t ModelChecker::get_next_seq_num()
191 return ++priv->used_sequence_numbers;
194 Node * ModelChecker::get_curr_node() const
196 return node_stack->get_head();
200 * @brief Choose the next thread to execute.
202 * This function chooses the next thread that should execute. It can force the
203 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
204 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
205 * The model-checker may have no preference regarding the next thread (i.e.,
206 * when exploring a new execution ordering), in which case this will return
208 * @param curr The current ModelAction. This action might guide the choice of
210 * @return The next thread to run. If the model-checker has no preference, NULL.
212 Thread * ModelChecker::get_next_thread(ModelAction *curr)
217 /* Do not split atomic actions. */
219 return thread_current();
220 /* The THREAD_CREATE action points to the created Thread */
221 else if (curr->get_type() == THREAD_CREATE)
222 return (Thread *)curr->get_location();
225 /* Have we completed exploring the preselected path? */
229 /* Else, we are trying to replay an execution */
230 ModelAction *next = node_stack->get_next()->get_action();
232 if (next == diverge) {
233 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
234 earliest_diverge = diverge;
236 Node *nextnode = next->get_node();
237 Node *prevnode = nextnode->get_parent();
238 scheduler->update_sleep_set(prevnode);
240 /* Reached divergence point */
241 if (nextnode->increment_misc()) {
242 /* The next node will try to satisfy a different misc_index values. */
243 tid = next->get_tid();
244 node_stack->pop_restofstack(2);
245 } else if (nextnode->increment_promise()) {
246 /* The next node will try to satisfy a different set of promises. */
247 tid = next->get_tid();
248 node_stack->pop_restofstack(2);
249 } else if (nextnode->increment_read_from()) {
250 /* The next node will read from a different value. */
251 tid = next->get_tid();
252 node_stack->pop_restofstack(2);
253 } else if (nextnode->increment_future_value()) {
254 /* The next node will try to read from a different future value. */
255 tid = next->get_tid();
256 node_stack->pop_restofstack(2);
257 } else if (nextnode->increment_relseq_break()) {
258 /* The next node will try to resolve a release sequence differently */
259 tid = next->get_tid();
260 node_stack->pop_restofstack(2);
263 /* Make a different thread execute for next step */
264 scheduler->add_sleep(get_thread(next->get_tid()));
265 tid = prevnode->get_next_backtrack();
266 /* Make sure the backtracked thread isn't sleeping. */
267 node_stack->pop_restofstack(1);
268 if (diverge == earliest_diverge) {
269 earliest_diverge = prevnode->get_action();
272 /* The correct sleep set is in the parent node. */
275 DEBUG("*** Divergence point ***\n");
279 tid = next->get_tid();
281 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
282 ASSERT(tid != THREAD_ID_T_NONE);
283 return thread_map->get(id_to_int(tid));
287 * We need to know what the next actions of all threads in the sleep
288 * set will be. This method computes them and stores the actions at
289 * the corresponding thread object's pending action.
292 void ModelChecker::execute_sleep_set()
294 for (unsigned int i = 0; i < get_num_threads(); i++) {
295 thread_id_t tid = int_to_id(i);
296 Thread *thr = get_thread(tid);
297 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
298 thr->set_state(THREAD_RUNNING);
299 scheduler->next_thread(thr);
300 Thread::swap(&system_context, thr);
301 priv->current_action->set_sleep_flag();
302 thr->set_pending(priv->current_action);
307 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
309 for (unsigned int i = 0; i < get_num_threads(); i++) {
310 Thread *thr = get_thread(int_to_id(i));
311 if (scheduler->is_sleep_set(thr)) {
312 ModelAction *pending_act = thr->get_pending();
313 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
314 //Remove this thread from sleep set
315 scheduler->remove_sleep(thr);
320 /** @brief Alert the model-checker that an incorrectly-ordered
321 * synchronization was made */
322 void ModelChecker::set_bad_synchronization()
324 priv->bad_synchronization = true;
327 bool ModelChecker::has_asserted() const
329 return priv->asserted;
332 void ModelChecker::set_assert()
334 priv->asserted = true;
338 * Check if we are in a deadlock. Should only be called at the end of an
339 * execution, although it should not give false positives in the middle of an
340 * execution (there should be some ENABLED thread).
342 * @return True if program is in a deadlock; false otherwise
344 bool ModelChecker::is_deadlocked() const
346 bool blocking_threads = false;
347 for (unsigned int i = 0; i < get_num_threads(); i++) {
348 thread_id_t tid = int_to_id(i);
351 Thread *t = get_thread(tid);
352 if (!t->is_model_thread() && t->get_pending())
353 blocking_threads = true;
355 return blocking_threads;
359 * Check if this is a complete execution. That is, have all thread completed
360 * execution (rather than exiting because sleep sets have forced a redundant
363 * @return True if the execution is complete.
365 bool ModelChecker::is_complete_execution() const
367 for (unsigned int i = 0; i < get_num_threads(); i++)
368 if (is_enabled(int_to_id(i)))
374 * @brief Assert a bug in the executing program.
376 * Use this function to assert any sort of bug in the user program. If the
377 * current trace is feasible (actually, a prefix of some feasible execution),
378 * then this execution will be aborted, printing the appropriate message. If
379 * the current trace is not yet feasible, the error message will be stashed and
380 * printed if the execution ever becomes feasible.
382 * @param msg Descriptive message for the bug (do not include newline char)
383 * @return True if bug is immediately-feasible
385 bool ModelChecker::assert_bug(const char *msg)
387 priv->bugs.push_back(new bug_message(msg));
389 if (isfeasibleprefix()) {
397 * @brief Assert a bug in the executing program, asserted by a user thread
398 * @see ModelChecker::assert_bug
399 * @param msg Descriptive message for the bug (do not include newline char)
401 void ModelChecker::assert_user_bug(const char *msg)
403 /* If feasible bug, bail out now */
405 switch_to_master(NULL);
408 /** @return True, if any bugs have been reported for this execution */
409 bool ModelChecker::have_bug_reports() const
411 return priv->bugs.size() != 0;
414 /** @brief Print bug report listing for this execution (if any bugs exist) */
415 void ModelChecker::print_bugs() const
417 if (have_bug_reports()) {
418 model_print("Bug report: %zu bug%s detected\n",
420 priv->bugs.size() > 1 ? "s" : "");
421 for (unsigned int i = 0; i < priv->bugs.size(); i++)
422 priv->bugs[i]->print();
427 * @brief Record end-of-execution stats
429 * Must be run when exiting an execution. Records various stats.
430 * @see struct execution_stats
432 void ModelChecker::record_stats()
435 if (!isfeasibleprefix())
436 stats.num_infeasible++;
437 else if (have_bug_reports())
438 stats.num_buggy_executions++;
439 else if (is_complete_execution())
440 stats.num_complete++;
442 stats.num_redundant++;
445 /** @brief Print execution stats */
446 void ModelChecker::print_stats() const
448 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
449 model_print("Number of redundant executions: %d\n", stats.num_redundant);
450 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
451 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
452 model_print("Total executions: %d\n", stats.num_total);
453 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
457 * @brief End-of-exeuction print
458 * @param printbugs Should any existing bugs be printed?
460 void ModelChecker::print_execution(bool printbugs) const
462 print_program_output();
464 if (DBG_ENABLED() || params.verbose) {
465 model_print("Earliest divergence point since last feasible execution:\n");
466 if (earliest_diverge)
467 earliest_diverge->print();
469 model_print("(Not set)\n");
475 /* Don't print invalid bugs */
484 * Queries the model-checker for more executions to explore and, if one
485 * exists, resets the model-checker state to execute a new execution.
487 * @return If there are more executions to explore, return true. Otherwise,
490 bool ModelChecker::next_execution()
493 /* Is this execution a feasible execution that's worth bug-checking? */
494 bool complete = isfeasibleprefix() && (is_complete_execution() ||
497 /* End-of-execution bug checks */
500 assert_bug("Deadlock detected");
508 if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
509 print_execution(complete);
511 clear_program_output();
514 earliest_diverge = NULL;
516 if ((diverge = get_next_backtrack()) == NULL)
520 model_print("Next execution will diverge at:\n");
524 reset_to_initial_state();
528 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
530 switch (act->get_type()) {
535 /* Optimization: relaxed operations don't need backtracking */
536 if (act->is_relaxed())
538 /* linear search: from most recent to oldest */
539 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
540 action_list_t::reverse_iterator rit;
541 for (rit = list->rbegin(); rit != list->rend(); rit++) {
542 ModelAction *prev = *rit;
543 if (prev->could_synchronize_with(act))
549 case ATOMIC_TRYLOCK: {
550 /* linear search: from most recent to oldest */
551 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
552 action_list_t::reverse_iterator rit;
553 for (rit = list->rbegin(); rit != list->rend(); rit++) {
554 ModelAction *prev = *rit;
555 if (act->is_conflicting_lock(prev))
560 case ATOMIC_UNLOCK: {
561 /* linear search: from most recent to oldest */
562 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
563 action_list_t::reverse_iterator rit;
564 for (rit = list->rbegin(); rit != list->rend(); rit++) {
565 ModelAction *prev = *rit;
566 if (!act->same_thread(prev) && prev->is_failed_trylock())
572 /* linear search: from most recent to oldest */
573 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
574 action_list_t::reverse_iterator rit;
575 for (rit = list->rbegin(); rit != list->rend(); rit++) {
576 ModelAction *prev = *rit;
577 if (!act->same_thread(prev) && prev->is_failed_trylock())
579 if (!act->same_thread(prev) && prev->is_notify())
585 case ATOMIC_NOTIFY_ALL:
586 case ATOMIC_NOTIFY_ONE: {
587 /* linear search: from most recent to oldest */
588 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
589 action_list_t::reverse_iterator rit;
590 for (rit = list->rbegin(); rit != list->rend(); rit++) {
591 ModelAction *prev = *rit;
592 if (!act->same_thread(prev) && prev->is_wait())
603 /** This method finds backtracking points where we should try to
604 * reorder the parameter ModelAction against.
606 * @param the ModelAction to find backtracking points for.
608 void ModelChecker::set_backtracking(ModelAction *act)
610 Thread *t = get_thread(act);
611 ModelAction *prev = get_last_conflict(act);
615 Node *node = prev->get_node()->get_parent();
617 int low_tid, high_tid;
618 if (node->is_enabled(t)) {
619 low_tid = id_to_int(act->get_tid());
620 high_tid = low_tid + 1;
623 high_tid = get_num_threads();
626 for (int i = low_tid; i < high_tid; i++) {
627 thread_id_t tid = int_to_id(i);
629 /* Make sure this thread can be enabled here. */
630 if (i >= node->get_num_threads())
633 /* Don't backtrack into a point where the thread is disabled or sleeping. */
634 if (node->enabled_status(tid) != THREAD_ENABLED)
637 /* Check if this has been explored already */
638 if (node->has_been_explored(tid))
641 /* See if fairness allows */
642 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
644 for (int t = 0; t < node->get_num_threads(); t++) {
645 thread_id_t tother = int_to_id(t);
646 if (node->is_enabled(tother) && node->has_priority(tother)) {
654 /* Cache the latest backtracking point */
655 set_latest_backtrack(prev);
657 /* If this is a new backtracking point, mark the tree */
658 if (!node->set_backtrack(tid))
660 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
661 id_to_int(prev->get_tid()),
662 id_to_int(t->get_id()));
671 * @brief Cache the a backtracking point as the "most recent", if eligible
673 * Note that this does not prepare the NodeStack for this backtracking
674 * operation, it only caches the action on a per-execution basis
676 * @param act The operation at which we should explore a different next action
677 * (i.e., backtracking point)
678 * @return True, if this action is now the most recent backtracking point;
681 bool ModelChecker::set_latest_backtrack(ModelAction *act)
683 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
684 priv->next_backtrack = act;
691 * Returns last backtracking point. The model checker will explore a different
692 * path for this point in the next execution.
693 * @return The ModelAction at which the next execution should diverge.
695 ModelAction * ModelChecker::get_next_backtrack()
697 ModelAction *next = priv->next_backtrack;
698 priv->next_backtrack = NULL;
703 * Processes a read or rmw model action.
704 * @param curr is the read model action to process.
705 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
706 * @return True if processing this read updates the mo_graph.
708 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
710 uint64_t value = VALUE_NONE;
711 bool updated = false;
713 const ModelAction *reads_from = curr->get_node()->get_read_from();
714 if (reads_from != NULL) {
715 mo_graph->startChanges();
717 value = reads_from->get_value();
718 bool r_status = false;
720 if (!second_part_of_rmw) {
721 check_recency(curr, reads_from);
722 r_status = r_modification_order(curr, reads_from);
726 if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
727 mo_graph->rollbackChanges();
728 priv->too_many_reads = false;
732 read_from(curr, reads_from);
733 mo_graph->commitChanges();
734 mo_check_promises(curr->get_tid(), reads_from);
737 } else if (!second_part_of_rmw) {
738 /* Read from future value */
739 value = curr->get_node()->get_future_value();
740 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
741 curr->set_read_from(NULL);
742 Promise *valuepromise = new Promise(curr, value, expiration);
743 promises->push_back(valuepromise);
745 get_thread(curr)->set_return_value(value);
751 * Processes a lock, trylock, or unlock model action. @param curr is
752 * the read model action to process.
754 * The try lock operation checks whether the lock is taken. If not,
755 * it falls to the normal lock operation case. If so, it returns
758 * The lock operation has already been checked that it is enabled, so
759 * it just grabs the lock and synchronizes with the previous unlock.
761 * The unlock operation has to re-enable all of the threads that are
762 * waiting on the lock.
764 * @return True if synchronization was updated; false otherwise
766 bool ModelChecker::process_mutex(ModelAction *curr)
768 std::mutex *mutex = NULL;
769 struct std::mutex_state *state = NULL;
771 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
772 mutex = (std::mutex *)curr->get_location();
773 state = mutex->get_state();
774 } else if (curr->is_wait()) {
775 mutex = (std::mutex *)curr->get_value();
776 state = mutex->get_state();
779 switch (curr->get_type()) {
780 case ATOMIC_TRYLOCK: {
781 bool success = !state->islocked;
782 curr->set_try_lock(success);
784 get_thread(curr)->set_return_value(0);
787 get_thread(curr)->set_return_value(1);
789 //otherwise fall into the lock case
791 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
792 assert_bug("Lock access before initialization");
793 state->islocked = true;
794 ModelAction *unlock = get_last_unlock(curr);
795 //synchronize with the previous unlock statement
796 if (unlock != NULL) {
797 curr->synchronize_with(unlock);
802 case ATOMIC_UNLOCK: {
804 state->islocked = false;
805 //wake up the other threads
806 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
807 //activate all the waiting threads
808 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
809 scheduler->wake(get_thread(*rit));
816 state->islocked = false;
817 //wake up the other threads
818 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
819 //activate all the waiting threads
820 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
821 scheduler->wake(get_thread(*rit));
824 //check whether we should go to sleep or not...simulate spurious failures
825 if (curr->get_node()->get_misc() == 0) {
826 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
828 scheduler->sleep(get_thread(curr));
832 case ATOMIC_NOTIFY_ALL: {
833 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
834 //activate all the waiting threads
835 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
836 scheduler->wake(get_thread(*rit));
841 case ATOMIC_NOTIFY_ONE: {
842 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
843 int wakeupthread = curr->get_node()->get_misc();
844 action_list_t::iterator it = waiters->begin();
845 advance(it, wakeupthread);
846 scheduler->wake(get_thread(*it));
858 * Process a write ModelAction
859 * @param curr The ModelAction to process
860 * @return True if the mo_graph was updated or promises were resolved
862 bool ModelChecker::process_write(ModelAction *curr)
864 bool updated_mod_order = w_modification_order(curr);
865 bool updated_promises = resolve_promises(curr);
867 if (promises->size() == 0) {
868 for (unsigned int i = 0; i < futurevalues->size(); i++) {
869 struct PendingFutureValue pfv = (*futurevalues)[i];
870 //Do more ambitious checks now that mo is more complete
871 if (mo_may_allow(pfv.writer, pfv.act) &&
872 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number() + params.maxfuturedelay))
873 set_latest_backtrack(pfv.act);
875 futurevalues->resize(0);
878 mo_graph->commitChanges();
879 mo_check_promises(curr->get_tid(), curr);
881 get_thread(curr)->set_return_value(VALUE_NONE);
882 return updated_mod_order || updated_promises;
886 * Process a fence ModelAction
887 * @param curr The ModelAction to process
888 * @return True if synchronization was updated
890 bool ModelChecker::process_fence(ModelAction *curr)
893 * fence-relaxed: no-op
894 * fence-release: only log the occurence (not in this function), for
895 * use in later synchronization
896 * fence-acquire (this function): search for hypothetical release
899 bool updated = false;
900 if (curr->is_acquire()) {
901 action_list_t *list = action_trace;
902 action_list_t::reverse_iterator rit;
903 /* Find X : is_read(X) && X --sb-> curr */
904 for (rit = list->rbegin(); rit != list->rend(); rit++) {
905 ModelAction *act = *rit;
908 if (act->get_tid() != curr->get_tid())
910 /* Stop at the beginning of the thread */
911 if (act->is_thread_start())
913 /* Stop once we reach a prior fence-acquire */
914 if (act->is_fence() && act->is_acquire())
918 /* read-acquire will find its own release sequences */
919 if (act->is_acquire())
922 /* Establish hypothetical release sequences */
923 rel_heads_list_t release_heads;
924 get_release_seq_heads(curr, act, &release_heads);
925 for (unsigned int i = 0; i < release_heads.size(); i++)
926 if (!curr->synchronize_with(release_heads[i]))
927 set_bad_synchronization();
928 if (release_heads.size() != 0)
936 * @brief Process the current action for thread-related activity
938 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
939 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
940 * synchronization, etc. This function is a no-op for non-THREAD actions
941 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
943 * @param curr The current action
944 * @return True if synchronization was updated or a thread completed
946 bool ModelChecker::process_thread_action(ModelAction *curr)
948 bool updated = false;
950 switch (curr->get_type()) {
951 case THREAD_CREATE: {
952 Thread *th = (Thread *)curr->get_location();
953 th->set_creation(curr);
957 Thread *blocking = (Thread *)curr->get_location();
958 ModelAction *act = get_last_action(blocking->get_id());
959 curr->synchronize_with(act);
960 updated = true; /* trigger rel-seq checks */
963 case THREAD_FINISH: {
964 Thread *th = get_thread(curr);
965 while (!th->wait_list_empty()) {
966 ModelAction *act = th->pop_wait_list();
967 scheduler->wake(get_thread(act));
970 updated = true; /* trigger rel-seq checks */
974 check_promises(curr->get_tid(), NULL, curr->get_cv());
985 * @brief Process the current action for release sequence fixup activity
987 * Performs model-checker release sequence fixups for the current action,
988 * forcing a single pending release sequence to break (with a given, potential
989 * "loose" write) or to complete (i.e., synchronize). If a pending release
990 * sequence forms a complete release sequence, then we must perform the fixup
991 * synchronization, mo_graph additions, etc.
993 * @param curr The current action; must be a release sequence fixup action
994 * @param work_queue The work queue to which to add work items as they are
997 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
999 const ModelAction *write = curr->get_node()->get_relseq_break();
1000 struct release_seq *sequence = pending_rel_seqs->back();
1001 pending_rel_seqs->pop_back();
1003 ModelAction *acquire = sequence->acquire;
1004 const ModelAction *rf = sequence->rf;
1005 const ModelAction *release = sequence->release;
1009 ASSERT(release->same_thread(rf));
1011 if (write == NULL) {
1013 * @todo Forcing a synchronization requires that we set
1014 * modification order constraints. For instance, we can't allow
1015 * a fixup sequence in which two separate read-acquire
1016 * operations read from the same sequence, where the first one
1017 * synchronizes and the other doesn't. Essentially, we can't
1018 * allow any writes to insert themselves between 'release' and
1022 /* Must synchronize */
1023 if (!acquire->synchronize_with(release)) {
1024 set_bad_synchronization();
1027 /* Re-check all pending release sequences */
1028 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1029 /* Re-check act for mo_graph edges */
1030 work_queue->push_back(MOEdgeWorkEntry(acquire));
1032 /* propagate synchronization to later actions */
1033 action_list_t::reverse_iterator rit = action_trace->rbegin();
1034 for (; (*rit) != acquire; rit++) {
1035 ModelAction *propagate = *rit;
1036 if (acquire->happens_before(propagate)) {
1037 propagate->synchronize_with(acquire);
1038 /* Re-check 'propagate' for mo_graph edges */
1039 work_queue->push_back(MOEdgeWorkEntry(propagate));
1043 /* Break release sequence with new edges:
1044 * release --mo--> write --mo--> rf */
1045 mo_graph->addEdge(release, write);
1046 mo_graph->addEdge(write, rf);
1049 /* See if we have realized a data race */
1054 * Initialize the current action by performing one or more of the following
1055 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1056 * in the NodeStack, manipulating backtracking sets, allocating and
1057 * initializing clock vectors, and computing the promises to fulfill.
1059 * @param curr The current action, as passed from the user context; may be
1060 * freed/invalidated after the execution of this function, with a different
1061 * action "returned" its place (pass-by-reference)
1062 * @return True if curr is a newly-explored action; false otherwise
1064 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1066 ModelAction *newcurr;
1068 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1069 newcurr = process_rmw(*curr);
1072 if (newcurr->is_rmw())
1073 compute_promises(newcurr);
1079 (*curr)->set_seq_number(get_next_seq_num());
1081 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1083 /* First restore type and order in case of RMW operation */
1084 if ((*curr)->is_rmwr())
1085 newcurr->copy_typeandorder(*curr);
1087 ASSERT((*curr)->get_location() == newcurr->get_location());
1088 newcurr->copy_from_new(*curr);
1090 /* Discard duplicate ModelAction; use action from NodeStack */
1093 /* Always compute new clock vector */
1094 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1097 return false; /* Action was explored previously */
1101 /* Always compute new clock vector */
1102 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1104 /* Assign most recent release fence */
1105 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1108 * Perform one-time actions when pushing new ModelAction onto
1111 if (newcurr->is_write())
1112 compute_promises(newcurr);
1113 else if (newcurr->is_relseq_fixup())
1114 compute_relseq_breakwrites(newcurr);
1115 else if (newcurr->is_wait())
1116 newcurr->get_node()->set_misc_max(2);
1117 else if (newcurr->is_notify_one()) {
1118 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1120 return true; /* This was a new ModelAction */
1125 * @brief Establish reads-from relation between two actions
1127 * Perform basic operations involved with establishing a concrete rf relation,
1128 * including setting the ModelAction data and checking for release sequences.
1130 * @param act The action that is reading (must be a read)
1131 * @param rf The action from which we are reading (must be a write)
1133 * @return True if this read established synchronization
1135 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1137 act->set_read_from(rf);
1138 if (rf != NULL && act->is_acquire()) {
1139 rel_heads_list_t release_heads;
1140 get_release_seq_heads(act, act, &release_heads);
1141 int num_heads = release_heads.size();
1142 for (unsigned int i = 0; i < release_heads.size(); i++)
1143 if (!act->synchronize_with(release_heads[i])) {
1144 set_bad_synchronization();
1147 return num_heads > 0;
1153 * @brief Check whether a model action is enabled.
1155 * Checks whether a lock or join operation would be successful (i.e., is the
1156 * lock already locked, or is the joined thread already complete). If not, put
1157 * the action in a waiter list.
1159 * @param curr is the ModelAction to check whether it is enabled.
1160 * @return a bool that indicates whether the action is enabled.
1162 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1163 if (curr->is_lock()) {
1164 std::mutex *lock = (std::mutex *)curr->get_location();
1165 struct std::mutex_state *state = lock->get_state();
1166 if (state->islocked) {
1167 //Stick the action in the appropriate waiting queue
1168 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1171 } else if (curr->get_type() == THREAD_JOIN) {
1172 Thread *blocking = (Thread *)curr->get_location();
1173 if (!blocking->is_complete()) {
1174 blocking->push_wait_list(curr);
1183 * Stores the ModelAction for the current thread action. Call this
1184 * immediately before switching from user- to system-context to pass
1185 * data between them.
1186 * @param act The ModelAction created by the user-thread action
1188 void ModelChecker::set_current_action(ModelAction *act) {
1189 priv->current_action = act;
1193 * This is the heart of the model checker routine. It performs model-checking
1194 * actions corresponding to a given "current action." Among other processes, it
1195 * calculates reads-from relationships, updates synchronization clock vectors,
1196 * forms a memory_order constraints graph, and handles replay/backtrack
1197 * execution when running permutations of previously-observed executions.
1199 * @param curr The current action to process
1200 * @return The ModelAction that is actually executed; may be different than
1201 * curr; may be NULL, if the current action is not enabled to run
1203 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1206 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1208 if (!check_action_enabled(curr)) {
1209 /* Make the execution look like we chose to run this action
1210 * much later, when a lock/join can succeed */
1211 get_thread(curr)->set_pending(curr);
1212 scheduler->sleep(get_thread(curr));
1216 bool newly_explored = initialize_curr_action(&curr);
1222 wake_up_sleeping_actions(curr);
1224 /* Add the action to lists before any other model-checking tasks */
1225 if (!second_part_of_rmw)
1226 add_action_to_lists(curr);
1228 /* Build may_read_from set for newly-created actions */
1229 if (newly_explored && curr->is_read())
1230 build_reads_from_past(curr);
1232 /* Initialize work_queue with the "current action" work */
1233 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1234 while (!work_queue.empty() && !has_asserted()) {
1235 WorkQueueEntry work = work_queue.front();
1236 work_queue.pop_front();
1238 switch (work.type) {
1239 case WORK_CHECK_CURR_ACTION: {
1240 ModelAction *act = work.action;
1241 bool update = false; /* update this location's release seq's */
1242 bool update_all = false; /* update all release seq's */
1244 if (process_thread_action(curr))
1247 if (act->is_read() && process_read(act, second_part_of_rmw))
1250 if (act->is_write() && process_write(act))
1253 if (act->is_fence() && process_fence(act))
1256 if (act->is_mutex_op() && process_mutex(act))
1259 if (act->is_relseq_fixup())
1260 process_relseq_fixup(curr, &work_queue);
1263 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1265 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1268 case WORK_CHECK_RELEASE_SEQ:
1269 resolve_release_sequences(work.location, &work_queue);
1271 case WORK_CHECK_MO_EDGES: {
1272 /** @todo Complete verification of work_queue */
1273 ModelAction *act = work.action;
1274 bool updated = false;
1276 if (act->is_read()) {
1277 const ModelAction *rf = act->get_reads_from();
1278 if (rf != NULL && r_modification_order(act, rf))
1281 if (act->is_write()) {
1282 if (w_modification_order(act))
1285 mo_graph->commitChanges();
1288 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1297 check_curr_backtracking(curr);
1298 set_backtracking(curr);
1302 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1304 Node *currnode = curr->get_node();
1305 Node *parnode = currnode->get_parent();
1307 if ((parnode && !parnode->backtrack_empty()) ||
1308 !currnode->misc_empty() ||
1309 !currnode->read_from_empty() ||
1310 !currnode->future_value_empty() ||
1311 !currnode->promise_empty() ||
1312 !currnode->relseq_break_empty()) {
1313 set_latest_backtrack(curr);
1317 bool ModelChecker::promises_expired() const
1319 for (unsigned int i = 0; i < promises->size(); i++) {
1320 Promise *promise = (*promises)[i];
1321 if (promise->get_expiration() < priv->used_sequence_numbers)
1328 * This is the strongest feasibility check available.
1329 * @return whether the current trace (partial or complete) must be a prefix of
1332 bool ModelChecker::isfeasibleprefix() const
1334 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1338 * Returns whether the current completed trace is feasible, except for pending
1339 * release sequences.
1341 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1343 if (DBG_ENABLED() && promises->size() != 0)
1344 DEBUG("Infeasible: unrevolved promises\n");
1346 return !is_infeasible() && promises->size() == 0;
1350 * Check if the current partial trace is infeasible. Does not check any
1351 * end-of-execution flags, which might rule out the execution. Thus, this is
1352 * useful only for ruling an execution as infeasible.
1353 * @return whether the current partial trace is infeasible.
1355 bool ModelChecker::is_infeasible() const
1357 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1358 DEBUG("Infeasible: RMW violation\n");
1360 return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1364 * Check If the current partial trace is infeasible, while ignoring
1365 * infeasibility related to 2 RMW's reading from the same store. It does not
1366 * check end-of-execution feasibility.
1367 * @see ModelChecker::is_infeasible
1368 * @return whether the current partial trace is infeasible, ignoring multiple
1369 * RMWs reading from the same store.
1371 bool ModelChecker::is_infeasible_ignoreRMW() const
1373 if (DBG_ENABLED()) {
1374 if (mo_graph->checkForCycles())
1375 DEBUG("Infeasible: modification order cycles\n");
1376 if (priv->failed_promise)
1377 DEBUG("Infeasible: failed promise\n");
1378 if (priv->too_many_reads)
1379 DEBUG("Infeasible: too many reads\n");
1380 if (priv->bad_synchronization)
1381 DEBUG("Infeasible: bad synchronization ordering\n");
1382 if (promises_expired())
1383 DEBUG("Infeasible: promises expired\n");
1385 return mo_graph->checkForCycles() || priv->failed_promise ||
1386 priv->too_many_reads || priv->bad_synchronization ||
1390 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1391 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1392 ModelAction *lastread = get_last_action(act->get_tid());
1393 lastread->process_rmw(act);
1394 if (act->is_rmw() && lastread->get_reads_from() != NULL) {
1395 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1396 mo_graph->commitChanges();
1402 * Checks whether a thread has read from the same write for too many times
1403 * without seeing the effects of a later write.
1406 * 1) there must a different write that we could read from that would satisfy the modification order,
1407 * 2) we must have read from the same value in excess of maxreads times, and
1408 * 3) that other write must have been in the reads_from set for maxreads times.
1410 * If so, we decide that the execution is no longer feasible.
1412 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1414 if (params.maxreads != 0) {
1415 if (curr->get_node()->get_read_from_size() <= 1)
1417 //Must make sure that execution is currently feasible... We could
1418 //accidentally clear by rolling back
1419 if (is_infeasible())
1421 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1422 int tid = id_to_int(curr->get_tid());
1425 if ((int)thrd_lists->size() <= tid)
1427 action_list_t *list = &(*thrd_lists)[tid];
1429 action_list_t::reverse_iterator rit = list->rbegin();
1430 /* Skip past curr */
1431 for (; (*rit) != curr; rit++)
1433 /* go past curr now */
1436 action_list_t::reverse_iterator ritcopy = rit;
1437 //See if we have enough reads from the same value
1439 for (; count < params.maxreads; rit++, count++) {
1440 if (rit == list->rend())
1442 ModelAction *act = *rit;
1443 if (!act->is_read())
1446 if (act->get_reads_from() != rf)
1448 if (act->get_node()->get_read_from_size() <= 1)
1451 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1453 const ModelAction *write = curr->get_node()->get_read_from_at(i);
1455 /* Need a different write */
1459 /* Test to see whether this is a feasible write to read from */
1460 mo_graph->startChanges();
1461 r_modification_order(curr, write);
1462 bool feasiblereadfrom = !is_infeasible();
1463 mo_graph->rollbackChanges();
1465 if (!feasiblereadfrom)
1469 bool feasiblewrite = true;
1470 //new we need to see if this write works for everyone
1472 for (int loop = count; loop > 0; loop--, rit++) {
1473 ModelAction *act = *rit;
1474 bool foundvalue = false;
1475 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1476 if (act->get_node()->get_read_from_at(j) == write) {
1482 feasiblewrite = false;
1486 if (feasiblewrite) {
1487 priv->too_many_reads = true;
1495 * Updates the mo_graph with the constraints imposed from the current
1498 * Basic idea is the following: Go through each other thread and find
1499 * the lastest action that happened before our read. Two cases:
1501 * (1) The action is a write => that write must either occur before
1502 * the write we read from or be the write we read from.
1504 * (2) The action is a read => the write that that action read from
1505 * must occur before the write we read from or be the same write.
1507 * @param curr The current action. Must be a read.
1508 * @param rf The action that curr reads from. Must be a write.
1509 * @return True if modification order edges were added; false otherwise
1511 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1513 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1516 ASSERT(curr->is_read());
1518 /* Last SC fence in the current thread */
1519 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1521 /* Iterate over all threads */
1522 for (i = 0; i < thrd_lists->size(); i++) {
1523 /* Last SC fence in thread i */
1524 ModelAction *last_sc_fence_thread_local = NULL;
1525 if (int_to_id((int)i) != curr->get_tid())
1526 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1528 /* Last SC fence in thread i, before last SC fence in current thread */
1529 ModelAction *last_sc_fence_thread_before = NULL;
1530 if (last_sc_fence_local)
1531 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1533 /* Iterate over actions in thread, starting from most recent */
1534 action_list_t *list = &(*thrd_lists)[i];
1535 action_list_t::reverse_iterator rit;
1536 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1537 ModelAction *act = *rit;
1539 if (act->is_write() && act != rf && act != curr) {
1540 /* C++, Section 29.3 statement 5 */
1541 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1542 *act < *last_sc_fence_thread_local) {
1543 mo_graph->addEdge(act, rf);
1547 /* C++, Section 29.3 statement 4 */
1548 else if (act->is_seqcst() && last_sc_fence_local &&
1549 *act < *last_sc_fence_local) {
1550 mo_graph->addEdge(act, rf);
1554 /* C++, Section 29.3 statement 6 */
1555 else if (last_sc_fence_thread_before &&
1556 *act < *last_sc_fence_thread_before) {
1557 mo_graph->addEdge(act, rf);
1564 * Include at most one act per-thread that "happens
1565 * before" curr. Don't consider reflexively.
1567 if (act->happens_before(curr) && act != curr) {
1568 if (act->is_write()) {
1570 mo_graph->addEdge(act, rf);
1574 const ModelAction *prevreadfrom = act->get_reads_from();
1575 //if the previous read is unresolved, keep going...
1576 if (prevreadfrom == NULL)
1579 if (rf != prevreadfrom) {
1580 mo_graph->addEdge(prevreadfrom, rf);
1592 /** This method fixes up the modification order when we resolve a
1593 * promises. The basic problem is that actions that occur after the
1594 * read curr could not property add items to the modification order
1597 * So for each thread, we find the earliest item that happens after
1598 * the read curr. This is the item we have to fix up with additional
1599 * constraints. If that action is write, we add a MO edge between
1600 * the Action rf and that action. If the action is a read, we add a
1601 * MO edge between the Action rf, and whatever the read accessed.
1603 * @param curr is the read ModelAction that we are fixing up MO edges for.
1604 * @param rf is the write ModelAction that curr reads from.
1607 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1609 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1611 ASSERT(curr->is_read());
1613 /* Iterate over all threads */
1614 for (i = 0; i < thrd_lists->size(); i++) {
1615 /* Iterate over actions in thread, starting from most recent */
1616 action_list_t *list = &(*thrd_lists)[i];
1617 action_list_t::reverse_iterator rit;
1618 ModelAction *lastact = NULL;
1620 /* Find last action that happens after curr that is either not curr or a rmw */
1621 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1622 ModelAction *act = *rit;
1623 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1629 /* Include at most one act per-thread that "happens before" curr */
1630 if (lastact != NULL) {
1631 if (lastact == curr) {
1632 //Case 1: The resolved read is a RMW, and we need to make sure
1633 //that the write portion of the RMW mod order after rf
1635 mo_graph->addEdge(rf, lastact);
1636 } else if (lastact->is_read()) {
1637 //Case 2: The resolved read is a normal read and the next
1638 //operation is a read, and we need to make sure the value read
1639 //is mod ordered after rf
1641 const ModelAction *postreadfrom = lastact->get_reads_from();
1642 if (postreadfrom != NULL && rf != postreadfrom)
1643 mo_graph->addEdge(rf, postreadfrom);
1645 //Case 3: The resolved read is a normal read and the next
1646 //operation is a write, and we need to make sure that the
1647 //write is mod ordered after rf
1649 mo_graph->addEdge(rf, lastact);
1657 * Updates the mo_graph with the constraints imposed from the current write.
1659 * Basic idea is the following: Go through each other thread and find
1660 * the lastest action that happened before our write. Two cases:
1662 * (1) The action is a write => that write must occur before
1665 * (2) The action is a read => the write that that action read from
1666 * must occur before the current write.
1668 * This method also handles two other issues:
1670 * (I) Sequential Consistency: Making sure that if the current write is
1671 * seq_cst, that it occurs after the previous seq_cst write.
1673 * (II) Sending the write back to non-synchronizing reads.
1675 * @param curr The current action. Must be a write.
1676 * @return True if modification order edges were added; false otherwise
1678 bool ModelChecker::w_modification_order(ModelAction *curr)
1680 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1683 ASSERT(curr->is_write());
1685 if (curr->is_seqcst()) {
1686 /* We have to at least see the last sequentially consistent write,
1687 so we are initialized. */
1688 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1689 if (last_seq_cst != NULL) {
1690 mo_graph->addEdge(last_seq_cst, curr);
1695 /* Last SC fence in the current thread */
1696 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1698 /* Iterate over all threads */
1699 for (i = 0; i < thrd_lists->size(); i++) {
1700 /* Last SC fence in thread i, before last SC fence in current thread */
1701 ModelAction *last_sc_fence_thread_before = NULL;
1702 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1703 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1705 /* Iterate over actions in thread, starting from most recent */
1706 action_list_t *list = &(*thrd_lists)[i];
1707 action_list_t::reverse_iterator rit;
1708 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1709 ModelAction *act = *rit;
1712 * 1) If RMW and it actually read from something, then we
1713 * already have all relevant edges, so just skip to next
1716 * 2) If RMW and it didn't read from anything, we should
1717 * whatever edge we can get to speed up convergence.
1719 * 3) If normal write, we need to look at earlier actions, so
1720 * continue processing list.
1722 if (curr->is_rmw()) {
1723 if (curr->get_reads_from() != NULL)
1731 /* C++, Section 29.3 statement 7 */
1732 if (last_sc_fence_thread_before && act->is_write() &&
1733 *act < *last_sc_fence_thread_before) {
1734 mo_graph->addEdge(act, curr);
1740 * Include at most one act per-thread that "happens
1743 if (act->happens_before(curr)) {
1745 * Note: if act is RMW, just add edge:
1747 * The following edge should be handled elsewhere:
1748 * readfrom(act) --mo--> act
1750 if (act->is_write())
1751 mo_graph->addEdge(act, curr);
1752 else if (act->is_read()) {
1753 //if previous read accessed a null, just keep going
1754 if (act->get_reads_from() == NULL)
1756 mo_graph->addEdge(act->get_reads_from(), curr);
1760 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1761 !act->same_thread(curr)) {
1762 /* We have an action that:
1763 (1) did not happen before us
1764 (2) is a read and we are a write
1765 (3) cannot synchronize with us
1766 (4) is in a different thread
1768 that read could potentially read from our write. Note that
1769 these checks are overly conservative at this point, we'll
1770 do more checks before actually removing the
1774 if (thin_air_constraint_may_allow(curr, act)) {
1775 if (!is_infeasible() ||
1776 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1777 struct PendingFutureValue pfv = {curr, act};
1778 futurevalues->push_back(pfv);
1788 /** Arbitrary reads from the future are not allowed. Section 29.3
1789 * part 9 places some constraints. This method checks one result of constraint
1790 * constraint. Others require compiler support. */
1791 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1793 if (!writer->is_rmw())
1796 if (!reader->is_rmw())
1799 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1800 if (search == reader)
1802 if (search->get_tid() == reader->get_tid() &&
1803 search->happens_before(reader))
1811 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1812 * some constraints. This method checks one the following constraint (others
1813 * require compiler support):
1815 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1817 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1819 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1821 /* Iterate over all threads */
1822 for (i = 0; i < thrd_lists->size(); i++) {
1823 const ModelAction *write_after_read = NULL;
1825 /* Iterate over actions in thread, starting from most recent */
1826 action_list_t *list = &(*thrd_lists)[i];
1827 action_list_t::reverse_iterator rit;
1828 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1829 ModelAction *act = *rit;
1831 /* Don't disallow due to act == reader */
1832 if (!reader->happens_before(act) || reader == act)
1834 else if (act->is_write())
1835 write_after_read = act;
1836 else if (act->is_read() && act->get_reads_from() != NULL)
1837 write_after_read = act->get_reads_from();
1840 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1847 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1848 * The ModelAction under consideration is expected to be taking part in
1849 * release/acquire synchronization as an object of the "reads from" relation.
1850 * Note that this can only provide release sequence support for RMW chains
1851 * which do not read from the future, as those actions cannot be traced until
1852 * their "promise" is fulfilled. Similarly, we may not even establish the
1853 * presence of a release sequence with certainty, as some modification order
1854 * constraints may be decided further in the future. Thus, this function
1855 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1856 * and a boolean representing certainty.
1858 * @param rf The action that might be part of a release sequence. Must be a
1860 * @param release_heads A pass-by-reference style return parameter. After
1861 * execution of this function, release_heads will contain the heads of all the
1862 * relevant release sequences, if any exists with certainty
1863 * @param pending A pass-by-reference style return parameter which is only used
1864 * when returning false (i.e., uncertain). Returns most information regarding
1865 * an uncertain release sequence, including any write operations that might
1866 * break the sequence.
1867 * @return true, if the ModelChecker is certain that release_heads is complete;
1870 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1871 rel_heads_list_t *release_heads,
1872 struct release_seq *pending) const
1874 /* Only check for release sequences if there are no cycles */
1875 if (mo_graph->checkForCycles())
1879 ASSERT(rf->is_write());
1881 if (rf->is_release())
1882 release_heads->push_back(rf);
1883 else if (rf->get_last_fence_release())
1884 release_heads->push_back(rf->get_last_fence_release());
1886 break; /* End of RMW chain */
1888 /** @todo Need to be smarter here... In the linux lock
1889 * example, this will run to the beginning of the program for
1891 /** @todo The way to be smarter here is to keep going until 1
1892 * thread has a release preceded by an acquire and you've seen
1895 /* acq_rel RMW is a sufficient stopping condition */
1896 if (rf->is_acquire() && rf->is_release())
1897 return true; /* complete */
1899 rf = rf->get_reads_from();
1902 /* read from future: need to settle this later */
1904 return false; /* incomplete */
1907 if (rf->is_release())
1908 return true; /* complete */
1910 /* else relaxed write
1911 * - check for fence-release in the same thread (29.8, stmt. 3)
1912 * - check modification order for contiguous subsequence
1913 * -> rf must be same thread as release */
1915 const ModelAction *fence_release = rf->get_last_fence_release();
1916 /* Synchronize with a fence-release unconditionally; we don't need to
1917 * find any more "contiguous subsequence..." for it */
1919 release_heads->push_back(fence_release);
1921 int tid = id_to_int(rf->get_tid());
1922 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1923 action_list_t *list = &(*thrd_lists)[tid];
1924 action_list_t::const_reverse_iterator rit;
1926 /* Find rf in the thread list */
1927 rit = std::find(list->rbegin(), list->rend(), rf);
1928 ASSERT(rit != list->rend());
1930 /* Find the last {write,fence}-release */
1931 for (; rit != list->rend(); rit++) {
1932 if (fence_release && *(*rit) < *fence_release)
1934 if ((*rit)->is_release())
1937 if (rit == list->rend()) {
1938 /* No write-release in this thread */
1939 return true; /* complete */
1940 } else if (fence_release && *(*rit) < *fence_release) {
1941 /* The fence-release is more recent (and so, "stronger") than
1942 * the most recent write-release */
1943 return true; /* complete */
1944 } /* else, need to establish contiguous release sequence */
1945 ModelAction *release = *rit;
1947 ASSERT(rf->same_thread(release));
1949 pending->writes.clear();
1951 bool certain = true;
1952 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1953 if (id_to_int(rf->get_tid()) == (int)i)
1955 list = &(*thrd_lists)[i];
1957 /* Can we ensure no future writes from this thread may break
1958 * the release seq? */
1959 bool future_ordered = false;
1961 ModelAction *last = get_last_action(int_to_id(i));
1962 Thread *th = get_thread(int_to_id(i));
1963 if ((last && rf->happens_before(last)) ||
1966 future_ordered = true;
1968 ASSERT(!th->is_model_thread() || future_ordered);
1970 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1971 const ModelAction *act = *rit;
1972 /* Reach synchronization -> this thread is complete */
1973 if (act->happens_before(release))
1975 if (rf->happens_before(act)) {
1976 future_ordered = true;
1980 /* Only non-RMW writes can break release sequences */
1981 if (!act->is_write() || act->is_rmw())
1984 /* Check modification order */
1985 if (mo_graph->checkReachable(rf, act)) {
1986 /* rf --mo--> act */
1987 future_ordered = true;
1990 if (mo_graph->checkReachable(act, release))
1991 /* act --mo--> release */
1993 if (mo_graph->checkReachable(release, act) &&
1994 mo_graph->checkReachable(act, rf)) {
1995 /* release --mo-> act --mo--> rf */
1996 return true; /* complete */
1998 /* act may break release sequence */
1999 pending->writes.push_back(act);
2002 if (!future_ordered)
2003 certain = false; /* This thread is uncertain */
2007 release_heads->push_back(release);
2008 pending->writes.clear();
2010 pending->release = release;
2017 * An interface for getting the release sequence head(s) with which a
2018 * given ModelAction must synchronize. This function only returns a non-empty
2019 * result when it can locate a release sequence head with certainty. Otherwise,
2020 * it may mark the internal state of the ModelChecker so that it will handle
2021 * the release sequence at a later time, causing @a acquire to update its
2022 * synchronization at some later point in execution.
2024 * @param acquire The 'acquire' action that may synchronize with a release
2026 * @param read The read action that may read from a release sequence; this may
2027 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2028 * when 'acquire' is a fence-acquire)
2029 * @param release_heads A pass-by-reference return parameter. Will be filled
2030 * with the head(s) of the release sequence(s), if they exists with certainty.
2031 * @see ModelChecker::release_seq_heads
2033 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2034 ModelAction *read, rel_heads_list_t *release_heads)
2036 const ModelAction *rf = read->get_reads_from();
2037 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2038 sequence->acquire = acquire;
2039 sequence->read = read;
2041 if (!release_seq_heads(rf, release_heads, sequence)) {
2042 /* add act to 'lazy checking' list */
2043 pending_rel_seqs->push_back(sequence);
2045 snapshot_free(sequence);
2050 * Attempt to resolve all stashed operations that might synchronize with a
2051 * release sequence for a given location. This implements the "lazy" portion of
2052 * determining whether or not a release sequence was contiguous, since not all
2053 * modification order information is present at the time an action occurs.
2055 * @param location The location/object that should be checked for release
2056 * sequence resolutions. A NULL value means to check all locations.
2057 * @param work_queue The work queue to which to add work items as they are
2059 * @return True if any updates occurred (new synchronization, new mo_graph
2062 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2064 bool updated = false;
2065 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2066 while (it != pending_rel_seqs->end()) {
2067 struct release_seq *pending = *it;
2068 ModelAction *acquire = pending->acquire;
2069 const ModelAction *read = pending->read;
2071 /* Only resolve sequences on the given location, if provided */
2072 if (location && read->get_location() != location) {
2077 const ModelAction *rf = read->get_reads_from();
2078 rel_heads_list_t release_heads;
2080 complete = release_seq_heads(rf, &release_heads, pending);
2081 for (unsigned int i = 0; i < release_heads.size(); i++) {
2082 if (!acquire->has_synchronized_with(release_heads[i])) {
2083 if (acquire->synchronize_with(release_heads[i]))
2086 set_bad_synchronization();
2091 /* Re-check all pending release sequences */
2092 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2093 /* Re-check read-acquire for mo_graph edges */
2094 if (acquire->is_read())
2095 work_queue->push_back(MOEdgeWorkEntry(acquire));
2097 /* propagate synchronization to later actions */
2098 action_list_t::reverse_iterator rit = action_trace->rbegin();
2099 for (; (*rit) != acquire; rit++) {
2100 ModelAction *propagate = *rit;
2101 if (acquire->happens_before(propagate)) {
2102 propagate->synchronize_with(acquire);
2103 /* Re-check 'propagate' for mo_graph edges */
2104 work_queue->push_back(MOEdgeWorkEntry(propagate));
2109 it = pending_rel_seqs->erase(it);
2110 snapshot_free(pending);
2116 // If we resolved promises or data races, see if we have realized a data race.
2123 * Performs various bookkeeping operations for the current ModelAction. For
2124 * instance, adds action to the per-object, per-thread action vector and to the
2125 * action trace list of all thread actions.
2127 * @param act is the ModelAction to add.
2129 void ModelChecker::add_action_to_lists(ModelAction *act)
2131 int tid = id_to_int(act->get_tid());
2132 ModelAction *uninit = NULL;
2134 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2135 if (list->empty() && act->is_atomic_var()) {
2136 uninit = new_uninitialized_action(act->get_location());
2137 uninit_id = id_to_int(uninit->get_tid());
2138 list->push_back(uninit);
2140 list->push_back(act);
2142 action_trace->push_back(act);
2144 action_trace->push_front(uninit);
2146 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2147 if (tid >= (int)vec->size())
2148 vec->resize(priv->next_thread_id);
2149 (*vec)[tid].push_back(act);
2151 (*vec)[uninit_id].push_front(uninit);
2153 if ((int)thrd_last_action->size() <= tid)
2154 thrd_last_action->resize(get_num_threads());
2155 (*thrd_last_action)[tid] = act;
2157 (*thrd_last_action)[uninit_id] = uninit;
2159 if (act->is_fence() && act->is_release()) {
2160 if ((int)thrd_last_fence_release->size() <= tid)
2161 thrd_last_fence_release->resize(get_num_threads());
2162 (*thrd_last_fence_release)[tid] = act;
2165 if (act->is_wait()) {
2166 void *mutex_loc = (void *) act->get_value();
2167 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2169 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2170 if (tid >= (int)vec->size())
2171 vec->resize(priv->next_thread_id);
2172 (*vec)[tid].push_back(act);
2177 * @brief Get the last action performed by a particular Thread
2178 * @param tid The thread ID of the Thread in question
2179 * @return The last action in the thread
2181 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2183 int threadid = id_to_int(tid);
2184 if (threadid < (int)thrd_last_action->size())
2185 return (*thrd_last_action)[id_to_int(tid)];
2191 * @brief Get the last fence release performed by a particular Thread
2192 * @param tid The thread ID of the Thread in question
2193 * @return The last fence release in the thread, if one exists; NULL otherwise
2195 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2197 int threadid = id_to_int(tid);
2198 if (threadid < (int)thrd_last_fence_release->size())
2199 return (*thrd_last_fence_release)[id_to_int(tid)];
2205 * Gets the last memory_order_seq_cst write (in the total global sequence)
2206 * performed on a particular object (i.e., memory location), not including the
2208 * @param curr The current ModelAction; also denotes the object location to
2210 * @return The last seq_cst write
2212 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2214 void *location = curr->get_location();
2215 action_list_t *list = get_safe_ptr_action(obj_map, location);
2216 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2217 action_list_t::reverse_iterator rit;
2218 for (rit = list->rbegin(); rit != list->rend(); rit++)
2219 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2225 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2226 * performed in a particular thread, prior to a particular fence.
2227 * @param tid The ID of the thread to check
2228 * @param before_fence The fence from which to begin the search; if NULL, then
2229 * search for the most recent fence in the thread.
2230 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2232 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2234 /* All fences should have NULL location */
2235 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2236 action_list_t::reverse_iterator rit = list->rbegin();
2239 for (; rit != list->rend(); rit++)
2240 if (*rit == before_fence)
2243 ASSERT(*rit == before_fence);
2247 for (; rit != list->rend(); rit++)
2248 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2254 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2255 * location). This function identifies the mutex according to the current
2256 * action, which is presumed to perform on the same mutex.
2257 * @param curr The current ModelAction; also denotes the object location to
2259 * @return The last unlock operation
2261 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2263 void *location = curr->get_location();
2264 action_list_t *list = get_safe_ptr_action(obj_map, location);
2265 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2266 action_list_t::reverse_iterator rit;
2267 for (rit = list->rbegin(); rit != list->rend(); rit++)
2268 if ((*rit)->is_unlock() || (*rit)->is_wait())
2273 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2275 ModelAction *parent = get_last_action(tid);
2277 parent = get_thread(tid)->get_creation();
2282 * Returns the clock vector for a given thread.
2283 * @param tid The thread whose clock vector we want
2284 * @return Desired clock vector
2286 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2288 return get_parent_action(tid)->get_cv();
2292 * Resolve a set of Promises with a current write. The set is provided in the
2293 * Node corresponding to @a write.
2294 * @param write The ModelAction that is fulfilling Promises
2295 * @return True if promises were resolved; false otherwise
2297 bool ModelChecker::resolve_promises(ModelAction *write)
2299 bool resolved = false;
2300 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2302 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2303 Promise *promise = (*promises)[promise_index];
2304 if (write->get_node()->get_promise(i)) {
2305 ModelAction *read = promise->get_action();
2306 if (read->is_rmw()) {
2307 mo_graph->addRMWEdge(write, read);
2309 read_from(read, write);
2310 //First fix up the modification order for actions that happened
2312 r_modification_order(read, write);
2313 //Next fix up the modification order for actions that happened
2315 post_r_modification_order(read, write);
2316 //Make sure the promise's value matches the write's value
2317 ASSERT(promise->get_value() == write->get_value());
2320 promises->erase(promises->begin() + promise_index);
2321 threads_to_check.push_back(read->get_tid());
2328 //Check whether reading these writes has made threads unable to
2331 for (unsigned int i = 0; i < threads_to_check.size(); i++)
2332 mo_check_promises(threads_to_check[i], write);
2338 * Compute the set of promises that could potentially be satisfied by this
2339 * action. Note that the set computation actually appears in the Node, not in
2341 * @param curr The ModelAction that may satisfy promises
2343 void ModelChecker::compute_promises(ModelAction *curr)
2345 for (unsigned int i = 0; i < promises->size(); i++) {
2346 Promise *promise = (*promises)[i];
2347 const ModelAction *act = promise->get_action();
2348 if (!act->happens_before(curr) &&
2350 !act->could_synchronize_with(curr) &&
2351 !act->same_thread(curr) &&
2352 act->get_location() == curr->get_location() &&
2353 promise->get_value() == curr->get_value()) {
2354 curr->get_node()->set_promise(i, act->is_rmw());
2359 /** Checks promises in response to change in ClockVector Threads. */
2360 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2362 for (unsigned int i = 0; i < promises->size(); i++) {
2363 Promise *promise = (*promises)[i];
2364 const ModelAction *act = promise->get_action();
2365 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2366 merge_cv->synchronized_since(act)) {
2367 if (promise->increment_threads(tid)) {
2368 //Promise has failed
2369 priv->failed_promise = true;
2376 void ModelChecker::check_promises_thread_disabled() {
2377 for (unsigned int i = 0; i < promises->size(); i++) {
2378 Promise *promise = (*promises)[i];
2379 if (promise->check_promise()) {
2380 priv->failed_promise = true;
2386 /** Checks promises in response to addition to modification order for threads.
2388 * pthread is the thread that performed the read that created the promise
2390 * pread is the read that created the promise
2392 * pwrite is either the first write to same location as pread by
2393 * pthread that is sequenced after pread or the value read by the
2394 * first read to the same lcoation as pread by pthread that is
2395 * sequenced after pread..
2397 * 1. If tid=pthread, then we check what other threads are reachable
2398 * through the mode order starting with pwrite. Those threads cannot
2399 * perform a write that will resolve the promise due to modification
2400 * order constraints.
2402 * 2. If the tid is not pthread, we check whether pwrite can reach the
2403 * action write through the modification order. If so, that thread
2404 * cannot perform a future write that will resolve the promise due to
2405 * modificatin order constraints.
2407 * @param tid The thread that either read from the model action
2408 * write, or actually did the model action write.
2410 * @param write The ModelAction representing the relevant write.
2412 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write)
2414 void *location = write->get_location();
2415 for (unsigned int i = 0; i < promises->size(); i++) {
2416 Promise *promise = (*promises)[i];
2417 const ModelAction *act = promise->get_action();
2419 //Is this promise on the same location?
2420 if (act->get_location() != location)
2423 //same thread as the promise
2424 if (act->get_tid() == tid) {
2426 //do we have a pwrite for the promise, if not, set it
2427 if (promise->get_write() == NULL) {
2428 promise->set_write(write);
2429 //The pwrite cannot happen before the promise
2430 if (write->happens_before(act) && (write != act)) {
2431 priv->failed_promise = true;
2435 if (mo_graph->checkPromise(write, promise)) {
2436 priv->failed_promise = true;
2441 //Don't do any lookups twice for the same thread
2442 if (promise->has_sync_thread(tid))
2445 if (promise->get_write() && mo_graph->checkReachable(promise->get_write(), write)) {
2446 if (promise->increment_threads(tid)) {
2447 priv->failed_promise = true;
2455 * Compute the set of writes that may break the current pending release
2456 * sequence. This information is extracted from previou release sequence
2459 * @param curr The current ModelAction. Must be a release sequence fixup
2462 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2464 if (pending_rel_seqs->empty())
2467 struct release_seq *pending = pending_rel_seqs->back();
2468 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2469 const ModelAction *write = pending->writes[i];
2470 curr->get_node()->add_relseq_break(write);
2473 /* NULL means don't break the sequence; just synchronize */
2474 curr->get_node()->add_relseq_break(NULL);
2478 * Build up an initial set of all past writes that this 'read' action may read
2479 * from. This set is determined by the clock vector's "happens before"
2481 * @param curr is the current ModelAction that we are exploring; it must be a
2484 void ModelChecker::build_reads_from_past(ModelAction *curr)
2486 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2488 ASSERT(curr->is_read());
2490 ModelAction *last_sc_write = NULL;
2492 if (curr->is_seqcst())
2493 last_sc_write = get_last_seq_cst_write(curr);
2495 /* Iterate over all threads */
2496 for (i = 0; i < thrd_lists->size(); i++) {
2497 /* Iterate over actions in thread, starting from most recent */
2498 action_list_t *list = &(*thrd_lists)[i];
2499 action_list_t::reverse_iterator rit;
2500 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2501 ModelAction *act = *rit;
2503 /* Only consider 'write' actions */
2504 if (!act->is_write() || act == curr)
2507 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2508 bool allow_read = true;
2510 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2512 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2516 curr->get_node()->add_read_from(act);
2518 /* Include at most one act per-thread that "happens before" curr */
2519 if (act->happens_before(curr))
2524 if (DBG_ENABLED()) {
2525 model_print("Reached read action:\n");
2527 model_print("Printing may_read_from\n");
2528 curr->get_node()->print_may_read_from();
2529 model_print("End printing may_read_from\n");
2533 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2536 /* UNINIT actions don't have a Node, and they never sleep */
2537 if (write->is_uninitialized())
2539 Node *prevnode = write->get_node()->get_parent();
2541 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2542 if (write->is_release() && thread_sleep)
2544 if (!write->is_rmw()) {
2547 if (write->get_reads_from() == NULL)
2549 write = write->get_reads_from();
2554 * @brief Create a new action representing an uninitialized atomic
2555 * @param location The memory location of the atomic object
2556 * @return A pointer to a new ModelAction
2558 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2560 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2561 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2562 act->create_cv(NULL);
2566 static void print_list(action_list_t *list, int exec_num = -1)
2568 action_list_t::iterator it;
2570 model_print("---------------------------------------------------------------------\n");
2572 model_print("Execution %d:\n", exec_num);
2574 unsigned int hash = 0;
2576 for (it = list->begin(); it != list->end(); it++) {
2578 hash = hash^(hash<<3)^((*it)->hash());
2580 model_print("HASH %u\n", hash);
2581 model_print("---------------------------------------------------------------------\n");
2584 #if SUPPORT_MOD_ORDER_DUMP
2585 void ModelChecker::dumpGraph(char *filename) const
2588 sprintf(buffer, "%s.dot", filename);
2589 FILE *file = fopen(buffer, "w");
2590 fprintf(file, "digraph %s {\n", filename);
2591 mo_graph->dumpNodes(file);
2592 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2594 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2595 ModelAction *action = *it;
2596 if (action->is_read()) {
2597 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2598 if (action->get_reads_from() != NULL)
2599 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2601 if (thread_array[action->get_tid()] != NULL) {
2602 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2605 thread_array[action->get_tid()] = action;
2607 fprintf(file, "}\n");
2608 model_free(thread_array);
2613 /** @brief Prints an execution trace summary. */
2614 void ModelChecker::print_summary() const
2616 #if SUPPORT_MOD_ORDER_DUMP
2618 char buffername[100];
2619 sprintf(buffername, "exec%04u", stats.num_total);
2620 mo_graph->dumpGraphToFile(buffername);
2621 sprintf(buffername, "graph%04u", stats.num_total);
2622 dumpGraph(buffername);
2625 if (!isfeasibleprefix())
2626 model_print("INFEASIBLE EXECUTION!\n");
2627 print_list(action_trace, stats.num_total);
2632 * Add a Thread to the system for the first time. Should only be called once
2634 * @param t The Thread to add
2636 void ModelChecker::add_thread(Thread *t)
2638 thread_map->put(id_to_int(t->get_id()), t);
2639 scheduler->add_thread(t);
2643 * Removes a thread from the scheduler.
2644 * @param the thread to remove.
2646 void ModelChecker::remove_thread(Thread *t)
2648 scheduler->remove_thread(t);
2652 * @brief Get a Thread reference by its ID
2653 * @param tid The Thread's ID
2654 * @return A Thread reference
2656 Thread * ModelChecker::get_thread(thread_id_t tid) const
2658 return thread_map->get(id_to_int(tid));
2662 * @brief Get a reference to the Thread in which a ModelAction was executed
2663 * @param act The ModelAction
2664 * @return A Thread reference
2666 Thread * ModelChecker::get_thread(ModelAction *act) const
2668 return get_thread(act->get_tid());
2672 * @brief Check if a Thread is currently enabled
2673 * @param t The Thread to check
2674 * @return True if the Thread is currently enabled
2676 bool ModelChecker::is_enabled(Thread *t) const
2678 return scheduler->is_enabled(t);
2682 * @brief Check if a Thread is currently enabled
2683 * @param tid The ID of the Thread to check
2684 * @return True if the Thread is currently enabled
2686 bool ModelChecker::is_enabled(thread_id_t tid) const
2688 return scheduler->is_enabled(tid);
2692 * Switch from a user-context to the "master thread" context (a.k.a. system
2693 * context). This switch is made with the intention of exploring a particular
2694 * model-checking action (described by a ModelAction object). Must be called
2695 * from a user-thread context.
2697 * @param act The current action that will be explored. May be NULL only if
2698 * trace is exiting via an assertion (see ModelChecker::set_assert and
2699 * ModelChecker::has_asserted).
2700 * @return Return the value returned by the current action
2702 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2705 Thread *old = thread_current();
2706 set_current_action(act);
2707 old->set_state(THREAD_READY);
2708 if (Thread::swap(old, &system_context) < 0) {
2709 perror("swap threads");
2712 return old->get_return_value();
2716 * Takes the next step in the execution, if possible.
2717 * @param curr The current step to take
2718 * @return Returns true (success) if a step was taken and false otherwise.
2720 bool ModelChecker::take_step(ModelAction *curr)
2725 Thread *curr_thrd = get_thread(curr);
2726 ASSERT(curr_thrd->get_state() == THREAD_READY);
2728 curr = check_current_action(curr);
2730 /* Infeasible -> don't take any more steps */
2731 if (is_infeasible())
2733 else if (isfeasibleprefix() && have_bug_reports()) {
2738 if (params.bound != 0)
2739 if (priv->used_sequence_numbers > params.bound)
2742 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2743 scheduler->remove_thread(curr_thrd);
2745 Thread *next_thrd = get_next_thread(curr);
2746 next_thrd = scheduler->next_thread(next_thrd);
2748 DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2749 next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2752 * Launch end-of-execution release sequence fixups only when there are:
2754 * (1) no more user threads to run (or when execution replay chooses
2755 * the 'model_thread')
2756 * (2) pending release sequences
2757 * (3) pending assertions (i.e., data races)
2758 * (4) no pending promises
2760 if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2761 is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2762 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2763 pending_rel_seqs->size());
2764 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2765 std::memory_order_seq_cst, NULL, VALUE_NONE,
2767 set_current_action(fixup);
2771 /* next_thrd == NULL -> don't take any more steps */
2775 next_thrd->set_state(THREAD_RUNNING);
2777 if (next_thrd->get_pending() != NULL) {
2778 /* restart a pending action */
2779 set_current_action(next_thrd->get_pending());
2780 next_thrd->set_pending(NULL);
2781 next_thrd->set_state(THREAD_READY);
2785 /* Return false only if swap fails with an error */
2786 return (Thread::swap(&system_context, next_thrd) == 0);
2789 /** Wrapper to run the user's main function, with appropriate arguments */
2790 void user_main_wrapper(void *)
2792 user_main(model->params.argc, model->params.argv);
2795 /** @brief Run ModelChecker for the user program */
2796 void ModelChecker::run()
2800 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2804 /* Run user thread up to its first action */
2805 scheduler->next_thread(t);
2806 Thread::swap(&system_context, t);
2808 /* Wait for all threads to complete */
2809 while (take_step(priv->current_action));
2810 } while (next_execution());