10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
43 /* First thread created will have id INITIAL_THREAD_ID */
44 next_thread_id(INITIAL_THREAD_ID),
45 used_sequence_numbers(0),
49 failed_promise(false),
50 too_many_reads(false),
51 bad_synchronization(false),
55 ~model_snapshot_members() {
56 for (unsigned int i = 0; i < bugs.size(); i++)
61 ModelAction *current_action;
62 unsigned int next_thread_id;
63 modelclock_t used_sequence_numbers;
64 ModelAction *next_backtrack;
65 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
66 struct execution_stats stats;
69 /** @brief Incorrectly-ordered synchronization was made */
70 bool bad_synchronization;
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78 /* Initialize default scheduler */
80 scheduler(new Scheduler()),
82 earliest_diverge(NULL),
83 action_trace(new action_list_t()),
84 thread_map(new HashTable<int, Thread *, int>()),
85 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94 node_stack(new NodeStack()),
95 priv(new struct model_snapshot_members()),
96 mo_graph(new CycleGraph())
98 /* Initialize a model-checker thread, for special ModelActions */
99 model_thread = new Thread(get_next_id());
100 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
106 for (unsigned int i = 0; i < get_num_threads(); i++)
107 delete thread_map->get(i);
112 delete lock_waiters_map;
113 delete condvar_waiters_map;
116 for (unsigned int i = 0; i < promises->size(); i++)
117 delete (*promises)[i];
120 delete pending_rel_seqs;
122 delete thrd_last_action;
123 delete thrd_last_fence_release;
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
132 action_list_t *tmp = hash->get(ptr);
134 tmp = new action_list_t();
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
142 std::vector<action_list_t> *tmp = hash->get(ptr);
144 tmp = new std::vector<action_list_t>();
151 * Restores user program to initial state and resets all model-checker data
154 void ModelChecker::reset_to_initial_state()
156 DEBUG("+++ Resetting to initial state +++\n");
157 node_stack->reset_execution();
159 /* Print all model-checker output before rollback */
162 snapshotObject->backTrackBeforeStep(0);
165 /** @return a thread ID for a new Thread */
166 thread_id_t ModelChecker::get_next_id()
168 return priv->next_thread_id++;
171 /** @return the number of user threads created during this execution */
172 unsigned int ModelChecker::get_num_threads() const
174 return priv->next_thread_id;
177 /** @return The currently executing Thread. */
178 Thread * ModelChecker::get_current_thread() const
180 return scheduler->get_current_thread();
183 /** @return a sequence number for a new ModelAction */
184 modelclock_t ModelChecker::get_next_seq_num()
186 return ++priv->used_sequence_numbers;
189 Node * ModelChecker::get_curr_node() const
191 return node_stack->get_head();
195 * @brief Choose the next thread to execute.
197 * This function chooses the next thread that should execute. It can force the
198 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
199 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
200 * The model-checker may have no preference regarding the next thread (i.e.,
201 * when exploring a new execution ordering), in which case this will return
203 * @param curr The current ModelAction. This action might guide the choice of
205 * @return The next thread to run. If the model-checker has no preference, NULL.
207 Thread * ModelChecker::get_next_thread(ModelAction *curr)
212 /* Do not split atomic actions. */
214 return thread_current();
215 /* The THREAD_CREATE action points to the created Thread */
216 else if (curr->get_type() == THREAD_CREATE)
217 return (Thread *)curr->get_location();
220 /* Have we completed exploring the preselected path? */
224 /* Else, we are trying to replay an execution */
225 ModelAction *next = node_stack->get_next()->get_action();
227 if (next == diverge) {
228 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
229 earliest_diverge = diverge;
231 Node *nextnode = next->get_node();
232 Node *prevnode = nextnode->get_parent();
233 scheduler->update_sleep_set(prevnode);
235 /* Reached divergence point */
236 if (nextnode->increment_misc()) {
237 /* The next node will try to satisfy a different misc_index values. */
238 tid = next->get_tid();
239 node_stack->pop_restofstack(2);
240 } else if (nextnode->increment_promise()) {
241 /* The next node will try to satisfy a different set of promises. */
242 tid = next->get_tid();
243 node_stack->pop_restofstack(2);
244 } else if (nextnode->increment_read_from()) {
245 /* The next node will read from a different value. */
246 tid = next->get_tid();
247 node_stack->pop_restofstack(2);
248 } else if (nextnode->increment_future_value()) {
249 /* The next node will try to read from a different future value. */
250 tid = next->get_tid();
251 node_stack->pop_restofstack(2);
252 } else if (nextnode->increment_relseq_break()) {
253 /* The next node will try to resolve a release sequence differently */
254 tid = next->get_tid();
255 node_stack->pop_restofstack(2);
257 /* Make a different thread execute for next step */
258 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
259 tid = prevnode->get_next_backtrack();
260 /* Make sure the backtracked thread isn't sleeping. */
261 node_stack->pop_restofstack(1);
262 if (diverge == earliest_diverge) {
263 earliest_diverge = prevnode->get_action();
266 /* The correct sleep set is in the parent node. */
269 DEBUG("*** Divergence point ***\n");
273 tid = next->get_tid();
275 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
276 ASSERT(tid != THREAD_ID_T_NONE);
277 return thread_map->get(id_to_int(tid));
281 * We need to know what the next actions of all threads in the sleep
282 * set will be. This method computes them and stores the actions at
283 * the corresponding thread object's pending action.
286 void ModelChecker::execute_sleep_set()
288 for (unsigned int i = 0; i < get_num_threads(); i++) {
289 thread_id_t tid = int_to_id(i);
290 Thread *thr = get_thread(tid);
291 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
292 thr->set_state(THREAD_RUNNING);
293 scheduler->next_thread(thr);
294 Thread::swap(&system_context, thr);
295 priv->current_action->set_sleep_flag();
296 thr->set_pending(priv->current_action);
301 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
303 for (unsigned int i = 0; i < get_num_threads(); i++) {
304 Thread *thr = get_thread(int_to_id(i));
305 if (scheduler->is_sleep_set(thr)) {
306 ModelAction *pending_act = thr->get_pending();
307 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
308 //Remove this thread from sleep set
309 scheduler->remove_sleep(thr);
314 /** @brief Alert the model-checker that an incorrectly-ordered
315 * synchronization was made */
316 void ModelChecker::set_bad_synchronization()
318 priv->bad_synchronization = true;
321 bool ModelChecker::has_asserted() const
323 return priv->asserted;
326 void ModelChecker::set_assert()
328 priv->asserted = true;
332 * Check if we are in a deadlock. Should only be called at the end of an
333 * execution, although it should not give false positives in the middle of an
334 * execution (there should be some ENABLED thread).
336 * @return True if program is in a deadlock; false otherwise
338 bool ModelChecker::is_deadlocked() const
340 bool blocking_threads = false;
341 for (unsigned int i = 0; i < get_num_threads(); i++) {
342 thread_id_t tid = int_to_id(i);
345 Thread *t = get_thread(tid);
346 if (!t->is_model_thread() && t->get_pending())
347 blocking_threads = true;
349 return blocking_threads;
353 * Check if this is a complete execution. That is, have all thread completed
354 * execution (rather than exiting because sleep sets have forced a redundant
357 * @return True if the execution is complete.
359 bool ModelChecker::is_complete_execution() const
361 for (unsigned int i = 0; i < get_num_threads(); i++)
362 if (is_enabled(int_to_id(i)))
368 * @brief Assert a bug in the executing program.
370 * Use this function to assert any sort of bug in the user program. If the
371 * current trace is feasible (actually, a prefix of some feasible execution),
372 * then this execution will be aborted, printing the appropriate message. If
373 * the current trace is not yet feasible, the error message will be stashed and
374 * printed if the execution ever becomes feasible.
376 * @param msg Descriptive message for the bug (do not include newline char)
377 * @return True if bug is immediately-feasible
379 bool ModelChecker::assert_bug(const char *msg)
381 priv->bugs.push_back(new bug_message(msg));
383 if (isfeasibleprefix()) {
391 * @brief Assert a bug in the executing program, asserted by a user thread
392 * @see ModelChecker::assert_bug
393 * @param msg Descriptive message for the bug (do not include newline char)
395 void ModelChecker::assert_user_bug(const char *msg)
397 /* If feasible bug, bail out now */
399 switch_to_master(NULL);
402 /** @return True, if any bugs have been reported for this execution */
403 bool ModelChecker::have_bug_reports() const
405 return priv->bugs.size() != 0;
408 /** @brief Print bug report listing for this execution (if any bugs exist) */
409 void ModelChecker::print_bugs() const
411 if (have_bug_reports()) {
412 model_print("Bug report: %zu bug%s detected\n",
414 priv->bugs.size() > 1 ? "s" : "");
415 for (unsigned int i = 0; i < priv->bugs.size(); i++)
416 priv->bugs[i]->print();
421 * @brief Record end-of-execution stats
423 * Must be run when exiting an execution. Records various stats.
424 * @see struct execution_stats
426 void ModelChecker::record_stats()
429 if (!isfeasibleprefix())
430 stats.num_infeasible++;
431 else if (have_bug_reports())
432 stats.num_buggy_executions++;
433 else if (is_complete_execution())
434 stats.num_complete++;
436 stats.num_redundant++;
439 /** @brief Print execution stats */
440 void ModelChecker::print_stats() const
442 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
443 model_print("Number of redundant executions: %d\n", stats.num_redundant);
444 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
445 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
446 model_print("Total executions: %d\n", stats.num_total);
447 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
451 * @brief End-of-exeuction print
452 * @param printbugs Should any existing bugs be printed?
454 void ModelChecker::print_execution(bool printbugs) const
456 print_program_output();
458 if (DBG_ENABLED() || params.verbose) {
459 model_print("Earliest divergence point since last feasible execution:\n");
460 if (earliest_diverge)
461 earliest_diverge->print();
463 model_print("(Not set)\n");
469 /* Don't print invalid bugs */
478 * Queries the model-checker for more executions to explore and, if one
479 * exists, resets the model-checker state to execute a new execution.
481 * @return If there are more executions to explore, return true. Otherwise,
484 bool ModelChecker::next_execution()
487 /* Is this execution a feasible execution that's worth bug-checking? */
488 bool complete = isfeasibleprefix() && (is_complete_execution() ||
491 /* End-of-execution bug checks */
494 assert_bug("Deadlock detected");
502 if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
503 print_execution(complete);
505 clear_program_output();
508 earliest_diverge = NULL;
510 if ((diverge = get_next_backtrack()) == NULL)
514 model_print("Next execution will diverge at:\n");
518 reset_to_initial_state();
522 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
524 switch (act->get_type()) {
529 /* Optimization: relaxed operations don't need backtracking */
530 if (act->is_relaxed())
532 /* linear search: from most recent to oldest */
533 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
534 action_list_t::reverse_iterator rit;
535 for (rit = list->rbegin(); rit != list->rend(); rit++) {
536 ModelAction *prev = *rit;
537 if (prev->could_synchronize_with(act))
543 case ATOMIC_TRYLOCK: {
544 /* linear search: from most recent to oldest */
545 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
546 action_list_t::reverse_iterator rit;
547 for (rit = list->rbegin(); rit != list->rend(); rit++) {
548 ModelAction *prev = *rit;
549 if (act->is_conflicting_lock(prev))
554 case ATOMIC_UNLOCK: {
555 /* linear search: from most recent to oldest */
556 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
557 action_list_t::reverse_iterator rit;
558 for (rit = list->rbegin(); rit != list->rend(); rit++) {
559 ModelAction *prev = *rit;
560 if (!act->same_thread(prev) && prev->is_failed_trylock())
566 /* linear search: from most recent to oldest */
567 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
568 action_list_t::reverse_iterator rit;
569 for (rit = list->rbegin(); rit != list->rend(); rit++) {
570 ModelAction *prev = *rit;
571 if (!act->same_thread(prev) && prev->is_failed_trylock())
573 if (!act->same_thread(prev) && prev->is_notify())
579 case ATOMIC_NOTIFY_ALL:
580 case ATOMIC_NOTIFY_ONE: {
581 /* linear search: from most recent to oldest */
582 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
583 action_list_t::reverse_iterator rit;
584 for (rit = list->rbegin(); rit != list->rend(); rit++) {
585 ModelAction *prev = *rit;
586 if (!act->same_thread(prev) && prev->is_wait())
597 /** This method finds backtracking points where we should try to
598 * reorder the parameter ModelAction against.
600 * @param the ModelAction to find backtracking points for.
602 void ModelChecker::set_backtracking(ModelAction *act)
604 Thread *t = get_thread(act);
605 ModelAction *prev = get_last_conflict(act);
609 Node *node = prev->get_node()->get_parent();
611 int low_tid, high_tid;
612 if (node->is_enabled(t)) {
613 low_tid = id_to_int(act->get_tid());
614 high_tid = low_tid + 1;
617 high_tid = get_num_threads();
620 for (int i = low_tid; i < high_tid; i++) {
621 thread_id_t tid = int_to_id(i);
623 /* Make sure this thread can be enabled here. */
624 if (i >= node->get_num_threads())
627 /* Don't backtrack into a point where the thread is disabled or sleeping. */
628 if (node->enabled_status(tid) != THREAD_ENABLED)
631 /* Check if this has been explored already */
632 if (node->has_been_explored(tid))
635 /* See if fairness allows */
636 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
638 for (int t = 0; t < node->get_num_threads(); t++) {
639 thread_id_t tother = int_to_id(t);
640 if (node->is_enabled(tother) && node->has_priority(tother)) {
648 /* Cache the latest backtracking point */
649 set_latest_backtrack(prev);
651 /* If this is a new backtracking point, mark the tree */
652 if (!node->set_backtrack(tid))
654 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
655 id_to_int(prev->get_tid()),
656 id_to_int(t->get_id()));
665 * @brief Cache the a backtracking point as the "most recent", if eligible
667 * Note that this does not prepare the NodeStack for this backtracking
668 * operation, it only caches the action on a per-execution basis
670 * @param act The operation at which we should explore a different next action
671 * (i.e., backtracking point)
672 * @return True, if this action is now the most recent backtracking point;
675 bool ModelChecker::set_latest_backtrack(ModelAction *act)
677 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
678 priv->next_backtrack = act;
685 * Returns last backtracking point. The model checker will explore a different
686 * path for this point in the next execution.
687 * @return The ModelAction at which the next execution should diverge.
689 ModelAction * ModelChecker::get_next_backtrack()
691 ModelAction *next = priv->next_backtrack;
692 priv->next_backtrack = NULL;
697 * Processes a read or rmw model action.
698 * @param curr is the read model action to process.
699 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
700 * @return True if processing this read updates the mo_graph.
702 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
704 uint64_t value = VALUE_NONE;
705 bool updated = false;
707 const ModelAction *reads_from = curr->get_node()->get_read_from();
708 if (reads_from != NULL) {
709 mo_graph->startChanges();
711 value = reads_from->get_value();
712 bool r_status = false;
714 if (!second_part_of_rmw) {
715 check_recency(curr, reads_from);
716 r_status = r_modification_order(curr, reads_from);
720 if (!second_part_of_rmw && is_infeasible() && (curr->get_node()->increment_read_from() || curr->get_node()->increment_future_value())) {
721 mo_graph->rollbackChanges();
722 priv->too_many_reads = false;
726 read_from(curr, reads_from);
727 mo_graph->commitChanges();
728 mo_check_promises(curr->get_tid(), reads_from);
731 } else if (!second_part_of_rmw) {
732 /* Read from future value */
733 value = curr->get_node()->get_future_value();
734 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
735 curr->set_read_from(NULL);
736 Promise *valuepromise = new Promise(curr, value, expiration);
737 promises->push_back(valuepromise);
739 get_thread(curr)->set_return_value(value);
745 * Processes a lock, trylock, or unlock model action. @param curr is
746 * the read model action to process.
748 * The try lock operation checks whether the lock is taken. If not,
749 * it falls to the normal lock operation case. If so, it returns
752 * The lock operation has already been checked that it is enabled, so
753 * it just grabs the lock and synchronizes with the previous unlock.
755 * The unlock operation has to re-enable all of the threads that are
756 * waiting on the lock.
758 * @return True if synchronization was updated; false otherwise
760 bool ModelChecker::process_mutex(ModelAction *curr)
762 std::mutex *mutex = NULL;
763 struct std::mutex_state *state = NULL;
765 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
766 mutex = (std::mutex *)curr->get_location();
767 state = mutex->get_state();
768 } else if (curr->is_wait()) {
769 mutex = (std::mutex *)curr->get_value();
770 state = mutex->get_state();
773 switch (curr->get_type()) {
774 case ATOMIC_TRYLOCK: {
775 bool success = !state->islocked;
776 curr->set_try_lock(success);
778 get_thread(curr)->set_return_value(0);
781 get_thread(curr)->set_return_value(1);
783 //otherwise fall into the lock case
785 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
786 assert_bug("Lock access before initialization");
787 state->islocked = true;
788 ModelAction *unlock = get_last_unlock(curr);
789 //synchronize with the previous unlock statement
790 if (unlock != NULL) {
791 curr->synchronize_with(unlock);
796 case ATOMIC_UNLOCK: {
798 state->islocked = false;
799 //wake up the other threads
800 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
801 //activate all the waiting threads
802 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
803 scheduler->wake(get_thread(*rit));
810 state->islocked = false;
811 //wake up the other threads
812 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
813 //activate all the waiting threads
814 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
815 scheduler->wake(get_thread(*rit));
818 //check whether we should go to sleep or not...simulate spurious failures
819 if (curr->get_node()->get_misc() == 0) {
820 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
822 scheduler->sleep(get_current_thread());
826 case ATOMIC_NOTIFY_ALL: {
827 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
828 //activate all the waiting threads
829 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
830 scheduler->wake(get_thread(*rit));
835 case ATOMIC_NOTIFY_ONE: {
836 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
837 int wakeupthread = curr->get_node()->get_misc();
838 action_list_t::iterator it = waiters->begin();
839 advance(it, wakeupthread);
840 scheduler->wake(get_thread(*it));
852 * Process a write ModelAction
853 * @param curr The ModelAction to process
854 * @return True if the mo_graph was updated or promises were resolved
856 bool ModelChecker::process_write(ModelAction *curr)
858 bool updated_mod_order = w_modification_order(curr);
859 bool updated_promises = resolve_promises(curr);
861 if (promises->size() == 0) {
862 for (unsigned int i = 0; i < futurevalues->size(); i++) {
863 struct PendingFutureValue pfv = (*futurevalues)[i];
864 //Do more ambitious checks now that mo is more complete
865 if (mo_may_allow(pfv.writer, pfv.act) &&
866 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number() + params.maxfuturedelay))
867 set_latest_backtrack(pfv.act);
869 futurevalues->resize(0);
872 mo_graph->commitChanges();
873 mo_check_promises(curr->get_tid(), curr);
875 get_thread(curr)->set_return_value(VALUE_NONE);
876 return updated_mod_order || updated_promises;
880 * Process a fence ModelAction
881 * @param curr The ModelAction to process
882 * @return True if synchronization was updated
884 bool ModelChecker::process_fence(ModelAction *curr)
887 * fence-relaxed: no-op
888 * fence-release: only log the occurence (not in this function), for
889 * use in later synchronization
890 * fence-acquire (this function): search for hypothetical release
893 bool updated = false;
894 if (curr->is_acquire()) {
895 action_list_t *list = action_trace;
896 action_list_t::reverse_iterator rit;
897 /* Find X : is_read(X) && X --sb-> curr */
898 for (rit = list->rbegin(); rit != list->rend(); rit++) {
899 ModelAction *act = *rit;
902 if (act->get_tid() != curr->get_tid())
904 /* Stop at the beginning of the thread */
905 if (act->is_thread_start())
907 /* Stop once we reach a prior fence-acquire */
908 if (act->is_fence() && act->is_acquire())
912 /* read-acquire will find its own release sequences */
913 if (act->is_acquire())
916 /* Establish hypothetical release sequences */
917 rel_heads_list_t release_heads;
918 get_release_seq_heads(curr, act, &release_heads);
919 for (unsigned int i = 0; i < release_heads.size(); i++)
920 if (!curr->synchronize_with(release_heads[i]))
921 set_bad_synchronization();
922 if (release_heads.size() != 0)
930 * @brief Process the current action for thread-related activity
932 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
933 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
934 * synchronization, etc. This function is a no-op for non-THREAD actions
935 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
937 * @param curr The current action
938 * @return True if synchronization was updated or a thread completed
940 bool ModelChecker::process_thread_action(ModelAction *curr)
942 bool updated = false;
944 switch (curr->get_type()) {
945 case THREAD_CREATE: {
946 Thread *th = (Thread *)curr->get_location();
947 th->set_creation(curr);
951 Thread *blocking = (Thread *)curr->get_location();
952 ModelAction *act = get_last_action(blocking->get_id());
953 curr->synchronize_with(act);
954 updated = true; /* trigger rel-seq checks */
957 case THREAD_FINISH: {
958 Thread *th = get_thread(curr);
959 while (!th->wait_list_empty()) {
960 ModelAction *act = th->pop_wait_list();
961 scheduler->wake(get_thread(act));
964 updated = true; /* trigger rel-seq checks */
968 check_promises(curr->get_tid(), NULL, curr->get_cv());
979 * @brief Process the current action for release sequence fixup activity
981 * Performs model-checker release sequence fixups for the current action,
982 * forcing a single pending release sequence to break (with a given, potential
983 * "loose" write) or to complete (i.e., synchronize). If a pending release
984 * sequence forms a complete release sequence, then we must perform the fixup
985 * synchronization, mo_graph additions, etc.
987 * @param curr The current action; must be a release sequence fixup action
988 * @param work_queue The work queue to which to add work items as they are
991 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
993 const ModelAction *write = curr->get_node()->get_relseq_break();
994 struct release_seq *sequence = pending_rel_seqs->back();
995 pending_rel_seqs->pop_back();
997 ModelAction *acquire = sequence->acquire;
998 const ModelAction *rf = sequence->rf;
999 const ModelAction *release = sequence->release;
1003 ASSERT(release->same_thread(rf));
1005 if (write == NULL) {
1007 * @todo Forcing a synchronization requires that we set
1008 * modification order constraints. For instance, we can't allow
1009 * a fixup sequence in which two separate read-acquire
1010 * operations read from the same sequence, where the first one
1011 * synchronizes and the other doesn't. Essentially, we can't
1012 * allow any writes to insert themselves between 'release' and
1016 /* Must synchronize */
1017 if (!acquire->synchronize_with(release)) {
1018 set_bad_synchronization();
1021 /* Re-check all pending release sequences */
1022 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1023 /* Re-check act for mo_graph edges */
1024 work_queue->push_back(MOEdgeWorkEntry(acquire));
1026 /* propagate synchronization to later actions */
1027 action_list_t::reverse_iterator rit = action_trace->rbegin();
1028 for (; (*rit) != acquire; rit++) {
1029 ModelAction *propagate = *rit;
1030 if (acquire->happens_before(propagate)) {
1031 propagate->synchronize_with(acquire);
1032 /* Re-check 'propagate' for mo_graph edges */
1033 work_queue->push_back(MOEdgeWorkEntry(propagate));
1037 /* Break release sequence with new edges:
1038 * release --mo--> write --mo--> rf */
1039 mo_graph->addEdge(release, write);
1040 mo_graph->addEdge(write, rf);
1043 /* See if we have realized a data race */
1048 * Initialize the current action by performing one or more of the following
1049 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1050 * in the NodeStack, manipulating backtracking sets, allocating and
1051 * initializing clock vectors, and computing the promises to fulfill.
1053 * @param curr The current action, as passed from the user context; may be
1054 * freed/invalidated after the execution of this function, with a different
1055 * action "returned" its place (pass-by-reference)
1056 * @return True if curr is a newly-explored action; false otherwise
1058 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1060 ModelAction *newcurr;
1062 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1063 newcurr = process_rmw(*curr);
1066 if (newcurr->is_rmw())
1067 compute_promises(newcurr);
1073 (*curr)->set_seq_number(get_next_seq_num());
1075 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1077 /* First restore type and order in case of RMW operation */
1078 if ((*curr)->is_rmwr())
1079 newcurr->copy_typeandorder(*curr);
1081 ASSERT((*curr)->get_location() == newcurr->get_location());
1082 newcurr->copy_from_new(*curr);
1084 /* Discard duplicate ModelAction; use action from NodeStack */
1087 /* Always compute new clock vector */
1088 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1091 return false; /* Action was explored previously */
1095 /* Always compute new clock vector */
1096 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1098 /* Assign most recent release fence */
1099 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1102 * Perform one-time actions when pushing new ModelAction onto
1105 if (newcurr->is_write())
1106 compute_promises(newcurr);
1107 else if (newcurr->is_relseq_fixup())
1108 compute_relseq_breakwrites(newcurr);
1109 else if (newcurr->is_wait())
1110 newcurr->get_node()->set_misc_max(2);
1111 else if (newcurr->is_notify_one()) {
1112 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1114 return true; /* This was a new ModelAction */
1119 * @brief Establish reads-from relation between two actions
1121 * Perform basic operations involved with establishing a concrete rf relation,
1122 * including setting the ModelAction data and checking for release sequences.
1124 * @param act The action that is reading (must be a read)
1125 * @param rf The action from which we are reading (must be a write)
1127 * @return True if this read established synchronization
1129 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1131 act->set_read_from(rf);
1132 if (rf != NULL && act->is_acquire()) {
1133 rel_heads_list_t release_heads;
1134 get_release_seq_heads(act, act, &release_heads);
1135 int num_heads = release_heads.size();
1136 for (unsigned int i = 0; i < release_heads.size(); i++)
1137 if (!act->synchronize_with(release_heads[i])) {
1138 set_bad_synchronization();
1141 return num_heads > 0;
1147 * @brief Check whether a model action is enabled.
1149 * Checks whether a lock or join operation would be successful (i.e., is the
1150 * lock already locked, or is the joined thread already complete). If not, put
1151 * the action in a waiter list.
1153 * @param curr is the ModelAction to check whether it is enabled.
1154 * @return a bool that indicates whether the action is enabled.
1156 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1157 if (curr->is_lock()) {
1158 std::mutex *lock = (std::mutex *)curr->get_location();
1159 struct std::mutex_state *state = lock->get_state();
1160 if (state->islocked) {
1161 //Stick the action in the appropriate waiting queue
1162 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1165 } else if (curr->get_type() == THREAD_JOIN) {
1166 Thread *blocking = (Thread *)curr->get_location();
1167 if (!blocking->is_complete()) {
1168 blocking->push_wait_list(curr);
1177 * Stores the ModelAction for the current thread action. Call this
1178 * immediately before switching from user- to system-context to pass
1179 * data between them.
1180 * @param act The ModelAction created by the user-thread action
1182 void ModelChecker::set_current_action(ModelAction *act) {
1183 priv->current_action = act;
1187 * This is the heart of the model checker routine. It performs model-checking
1188 * actions corresponding to a given "current action." Among other processes, it
1189 * calculates reads-from relationships, updates synchronization clock vectors,
1190 * forms a memory_order constraints graph, and handles replay/backtrack
1191 * execution when running permutations of previously-observed executions.
1193 * @param curr The current action to process
1194 * @return The next Thread that must be executed. May be NULL if ModelChecker
1195 * makes no choice (e.g., according to replay execution, combining RMW actions,
1198 Thread * ModelChecker::check_current_action(ModelAction *curr)
1201 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1203 if (!check_action_enabled(curr)) {
1204 /* Make the execution look like we chose to run this action
1205 * much later, when a lock/join can succeed */
1206 get_current_thread()->set_pending(curr);
1207 scheduler->sleep(get_current_thread());
1208 return get_next_thread(NULL);
1211 bool newly_explored = initialize_curr_action(&curr);
1213 wake_up_sleeping_actions(curr);
1215 /* Add the action to lists before any other model-checking tasks */
1216 if (!second_part_of_rmw)
1217 add_action_to_lists(curr);
1219 /* Build may_read_from set for newly-created actions */
1220 if (newly_explored && curr->is_read())
1221 build_reads_from_past(curr);
1223 /* Initialize work_queue with the "current action" work */
1224 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1225 while (!work_queue.empty() && !has_asserted()) {
1226 WorkQueueEntry work = work_queue.front();
1227 work_queue.pop_front();
1229 switch (work.type) {
1230 case WORK_CHECK_CURR_ACTION: {
1231 ModelAction *act = work.action;
1232 bool update = false; /* update this location's release seq's */
1233 bool update_all = false; /* update all release seq's */
1235 if (process_thread_action(curr))
1238 if (act->is_read() && process_read(act, second_part_of_rmw))
1241 if (act->is_write() && process_write(act))
1244 if (act->is_fence() && process_fence(act))
1247 if (act->is_mutex_op() && process_mutex(act))
1250 if (act->is_relseq_fixup())
1251 process_relseq_fixup(curr, &work_queue);
1254 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1256 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1259 case WORK_CHECK_RELEASE_SEQ:
1260 resolve_release_sequences(work.location, &work_queue);
1262 case WORK_CHECK_MO_EDGES: {
1263 /** @todo Complete verification of work_queue */
1264 ModelAction *act = work.action;
1265 bool updated = false;
1267 if (act->is_read()) {
1268 const ModelAction *rf = act->get_reads_from();
1269 if (rf != NULL && r_modification_order(act, rf))
1272 if (act->is_write()) {
1273 if (w_modification_order(act))
1276 mo_graph->commitChanges();
1279 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1288 check_curr_backtracking(curr);
1289 set_backtracking(curr);
1290 return get_next_thread(curr);
1293 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1295 Node *currnode = curr->get_node();
1296 Node *parnode = currnode->get_parent();
1298 if (!parnode->backtrack_empty() ||
1299 !currnode->misc_empty() ||
1300 !currnode->read_from_empty() ||
1301 !currnode->future_value_empty() ||
1302 !currnode->promise_empty() ||
1303 !currnode->relseq_break_empty()) {
1304 set_latest_backtrack(curr);
1308 bool ModelChecker::promises_expired() const
1310 for (unsigned int i = 0; i < promises->size(); i++) {
1311 Promise *promise = (*promises)[i];
1312 if (promise->get_expiration() < priv->used_sequence_numbers)
1319 * This is the strongest feasibility check available.
1320 * @return whether the current trace (partial or complete) must be a prefix of
1323 bool ModelChecker::isfeasibleprefix() const
1325 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1329 * Returns whether the current completed trace is feasible, except for pending
1330 * release sequences.
1332 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1334 if (DBG_ENABLED() && promises->size() != 0)
1335 DEBUG("Infeasible: unrevolved promises\n");
1337 return !is_infeasible() && promises->size() == 0;
1341 * Check if the current partial trace is infeasible. Does not check any
1342 * end-of-execution flags, which might rule out the execution. Thus, this is
1343 * useful only for ruling an execution as infeasible.
1344 * @return whether the current partial trace is infeasible.
1346 bool ModelChecker::is_infeasible() const
1348 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1349 DEBUG("Infeasible: RMW violation\n");
1351 return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1355 * Check If the current partial trace is infeasible, while ignoring
1356 * infeasibility related to 2 RMW's reading from the same store. It does not
1357 * check end-of-execution feasibility.
1358 * @see ModelChecker::is_infeasible
1359 * @return whether the current partial trace is infeasible, ignoring multiple
1360 * RMWs reading from the same store.
1362 bool ModelChecker::is_infeasible_ignoreRMW() const
1364 if (DBG_ENABLED()) {
1365 if (mo_graph->checkForCycles())
1366 DEBUG("Infeasible: modification order cycles\n");
1367 if (priv->failed_promise)
1368 DEBUG("Infeasible: failed promise\n");
1369 if (priv->too_many_reads)
1370 DEBUG("Infeasible: too many reads\n");
1371 if (priv->bad_synchronization)
1372 DEBUG("Infeasible: bad synchronization ordering\n");
1373 if (promises_expired())
1374 DEBUG("Infeasible: promises expired\n");
1376 return mo_graph->checkForCycles() || priv->failed_promise ||
1377 priv->too_many_reads || priv->bad_synchronization ||
1381 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1382 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1383 ModelAction *lastread = get_last_action(act->get_tid());
1384 lastread->process_rmw(act);
1385 if (act->is_rmw() && lastread->get_reads_from() != NULL) {
1386 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1387 mo_graph->commitChanges();
1393 * Checks whether a thread has read from the same write for too many times
1394 * without seeing the effects of a later write.
1397 * 1) there must a different write that we could read from that would satisfy the modification order,
1398 * 2) we must have read from the same value in excess of maxreads times, and
1399 * 3) that other write must have been in the reads_from set for maxreads times.
1401 * If so, we decide that the execution is no longer feasible.
1403 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1405 if (params.maxreads != 0) {
1406 if (curr->get_node()->get_read_from_size() <= 1)
1408 //Must make sure that execution is currently feasible... We could
1409 //accidentally clear by rolling back
1410 if (is_infeasible())
1412 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1413 int tid = id_to_int(curr->get_tid());
1416 if ((int)thrd_lists->size() <= tid)
1418 action_list_t *list = &(*thrd_lists)[tid];
1420 action_list_t::reverse_iterator rit = list->rbegin();
1421 /* Skip past curr */
1422 for (; (*rit) != curr; rit++)
1424 /* go past curr now */
1427 action_list_t::reverse_iterator ritcopy = rit;
1428 //See if we have enough reads from the same value
1430 for (; count < params.maxreads; rit++, count++) {
1431 if (rit == list->rend())
1433 ModelAction *act = *rit;
1434 if (!act->is_read())
1437 if (act->get_reads_from() != rf)
1439 if (act->get_node()->get_read_from_size() <= 1)
1442 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1444 const ModelAction *write = curr->get_node()->get_read_from_at(i);
1446 /* Need a different write */
1450 /* Test to see whether this is a feasible write to read from */
1451 mo_graph->startChanges();
1452 r_modification_order(curr, write);
1453 bool feasiblereadfrom = !is_infeasible();
1454 mo_graph->rollbackChanges();
1456 if (!feasiblereadfrom)
1460 bool feasiblewrite = true;
1461 //new we need to see if this write works for everyone
1463 for (int loop = count; loop > 0; loop--, rit++) {
1464 ModelAction *act = *rit;
1465 bool foundvalue = false;
1466 for (int j = 0; j < act->get_node()->get_read_from_size(); j++) {
1467 if (act->get_node()->get_read_from_at(j) == write) {
1473 feasiblewrite = false;
1477 if (feasiblewrite) {
1478 priv->too_many_reads = true;
1486 * Updates the mo_graph with the constraints imposed from the current
1489 * Basic idea is the following: Go through each other thread and find
1490 * the lastest action that happened before our read. Two cases:
1492 * (1) The action is a write => that write must either occur before
1493 * the write we read from or be the write we read from.
1495 * (2) The action is a read => the write that that action read from
1496 * must occur before the write we read from or be the same write.
1498 * @param curr The current action. Must be a read.
1499 * @param rf The action that curr reads from. Must be a write.
1500 * @return True if modification order edges were added; false otherwise
1502 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1504 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1507 ASSERT(curr->is_read());
1509 /* Last SC fence in the current thread */
1510 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1512 /* Iterate over all threads */
1513 for (i = 0; i < thrd_lists->size(); i++) {
1514 /* Last SC fence in thread i */
1515 ModelAction *last_sc_fence_thread_local = NULL;
1516 if (int_to_id((int)i) != curr->get_tid())
1517 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1519 /* Last SC fence in thread i, before last SC fence in current thread */
1520 ModelAction *last_sc_fence_thread_before = NULL;
1521 if (last_sc_fence_local)
1522 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1524 /* Iterate over actions in thread, starting from most recent */
1525 action_list_t *list = &(*thrd_lists)[i];
1526 action_list_t::reverse_iterator rit;
1527 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1528 ModelAction *act = *rit;
1530 if (act->is_write() && act != rf && act != curr) {
1531 /* C++, Section 29.3 statement 5 */
1532 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1533 *act < *last_sc_fence_thread_local) {
1534 mo_graph->addEdge(act, rf);
1538 /* C++, Section 29.3 statement 4 */
1539 else if (act->is_seqcst() && last_sc_fence_local &&
1540 *act < *last_sc_fence_local) {
1541 mo_graph->addEdge(act, rf);
1545 /* C++, Section 29.3 statement 6 */
1546 else if (last_sc_fence_thread_before &&
1547 *act < *last_sc_fence_thread_before) {
1548 mo_graph->addEdge(act, rf);
1555 * Include at most one act per-thread that "happens
1556 * before" curr. Don't consider reflexively.
1558 if (act->happens_before(curr) && act != curr) {
1559 if (act->is_write()) {
1561 mo_graph->addEdge(act, rf);
1565 const ModelAction *prevreadfrom = act->get_reads_from();
1566 //if the previous read is unresolved, keep going...
1567 if (prevreadfrom == NULL)
1570 if (rf != prevreadfrom) {
1571 mo_graph->addEdge(prevreadfrom, rf);
1583 /** This method fixes up the modification order when we resolve a
1584 * promises. The basic problem is that actions that occur after the
1585 * read curr could not property add items to the modification order
1588 * So for each thread, we find the earliest item that happens after
1589 * the read curr. This is the item we have to fix up with additional
1590 * constraints. If that action is write, we add a MO edge between
1591 * the Action rf and that action. If the action is a read, we add a
1592 * MO edge between the Action rf, and whatever the read accessed.
1594 * @param curr is the read ModelAction that we are fixing up MO edges for.
1595 * @param rf is the write ModelAction that curr reads from.
1598 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1600 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1602 ASSERT(curr->is_read());
1604 /* Iterate over all threads */
1605 for (i = 0; i < thrd_lists->size(); i++) {
1606 /* Iterate over actions in thread, starting from most recent */
1607 action_list_t *list = &(*thrd_lists)[i];
1608 action_list_t::reverse_iterator rit;
1609 ModelAction *lastact = NULL;
1611 /* Find last action that happens after curr that is either not curr or a rmw */
1612 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1613 ModelAction *act = *rit;
1614 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1620 /* Include at most one act per-thread that "happens before" curr */
1621 if (lastact != NULL) {
1622 if (lastact == curr) {
1623 //Case 1: The resolved read is a RMW, and we need to make sure
1624 //that the write portion of the RMW mod order after rf
1626 mo_graph->addEdge(rf, lastact);
1627 } else if (lastact->is_read()) {
1628 //Case 2: The resolved read is a normal read and the next
1629 //operation is a read, and we need to make sure the value read
1630 //is mod ordered after rf
1632 const ModelAction *postreadfrom = lastact->get_reads_from();
1633 if (postreadfrom != NULL && rf != postreadfrom)
1634 mo_graph->addEdge(rf, postreadfrom);
1636 //Case 3: The resolved read is a normal read and the next
1637 //operation is a write, and we need to make sure that the
1638 //write is mod ordered after rf
1640 mo_graph->addEdge(rf, lastact);
1648 * Updates the mo_graph with the constraints imposed from the current write.
1650 * Basic idea is the following: Go through each other thread and find
1651 * the lastest action that happened before our write. Two cases:
1653 * (1) The action is a write => that write must occur before
1656 * (2) The action is a read => the write that that action read from
1657 * must occur before the current write.
1659 * This method also handles two other issues:
1661 * (I) Sequential Consistency: Making sure that if the current write is
1662 * seq_cst, that it occurs after the previous seq_cst write.
1664 * (II) Sending the write back to non-synchronizing reads.
1666 * @param curr The current action. Must be a write.
1667 * @return True if modification order edges were added; false otherwise
1669 bool ModelChecker::w_modification_order(ModelAction *curr)
1671 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1674 ASSERT(curr->is_write());
1676 if (curr->is_seqcst()) {
1677 /* We have to at least see the last sequentially consistent write,
1678 so we are initialized. */
1679 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1680 if (last_seq_cst != NULL) {
1681 mo_graph->addEdge(last_seq_cst, curr);
1686 /* Last SC fence in the current thread */
1687 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1689 /* Iterate over all threads */
1690 for (i = 0; i < thrd_lists->size(); i++) {
1691 /* Last SC fence in thread i, before last SC fence in current thread */
1692 ModelAction *last_sc_fence_thread_before = NULL;
1693 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1694 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1696 /* Iterate over actions in thread, starting from most recent */
1697 action_list_t *list = &(*thrd_lists)[i];
1698 action_list_t::reverse_iterator rit;
1699 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1700 ModelAction *act = *rit;
1703 * 1) If RMW and it actually read from something, then we
1704 * already have all relevant edges, so just skip to next
1707 * 2) If RMW and it didn't read from anything, we should
1708 * whatever edge we can get to speed up convergence.
1710 * 3) If normal write, we need to look at earlier actions, so
1711 * continue processing list.
1713 if (curr->is_rmw()) {
1714 if (curr->get_reads_from() != NULL)
1722 /* C++, Section 29.3 statement 7 */
1723 if (last_sc_fence_thread_before && act->is_write() &&
1724 *act < *last_sc_fence_thread_before) {
1725 mo_graph->addEdge(act, curr);
1731 * Include at most one act per-thread that "happens
1734 if (act->happens_before(curr)) {
1736 * Note: if act is RMW, just add edge:
1738 * The following edge should be handled elsewhere:
1739 * readfrom(act) --mo--> act
1741 if (act->is_write())
1742 mo_graph->addEdge(act, curr);
1743 else if (act->is_read()) {
1744 //if previous read accessed a null, just keep going
1745 if (act->get_reads_from() == NULL)
1747 mo_graph->addEdge(act->get_reads_from(), curr);
1751 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1752 !act->same_thread(curr)) {
1753 /* We have an action that:
1754 (1) did not happen before us
1755 (2) is a read and we are a write
1756 (3) cannot synchronize with us
1757 (4) is in a different thread
1759 that read could potentially read from our write. Note that
1760 these checks are overly conservative at this point, we'll
1761 do more checks before actually removing the
1765 if (thin_air_constraint_may_allow(curr, act)) {
1766 if (!is_infeasible() ||
1767 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1768 struct PendingFutureValue pfv = {curr, act};
1769 futurevalues->push_back(pfv);
1779 /** Arbitrary reads from the future are not allowed. Section 29.3
1780 * part 9 places some constraints. This method checks one result of constraint
1781 * constraint. Others require compiler support. */
1782 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1784 if (!writer->is_rmw())
1787 if (!reader->is_rmw())
1790 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1791 if (search == reader)
1793 if (search->get_tid() == reader->get_tid() &&
1794 search->happens_before(reader))
1802 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1803 * some constraints. This method checks one the following constraint (others
1804 * require compiler support):
1806 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1808 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1810 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1812 /* Iterate over all threads */
1813 for (i = 0; i < thrd_lists->size(); i++) {
1814 const ModelAction *write_after_read = NULL;
1816 /* Iterate over actions in thread, starting from most recent */
1817 action_list_t *list = &(*thrd_lists)[i];
1818 action_list_t::reverse_iterator rit;
1819 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1820 ModelAction *act = *rit;
1822 /* Don't disallow due to act == reader */
1823 if (!reader->happens_before(act) || reader == act)
1825 else if (act->is_write())
1826 write_after_read = act;
1827 else if (act->is_read() && act->get_reads_from() != NULL)
1828 write_after_read = act->get_reads_from();
1831 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1838 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1839 * The ModelAction under consideration is expected to be taking part in
1840 * release/acquire synchronization as an object of the "reads from" relation.
1841 * Note that this can only provide release sequence support for RMW chains
1842 * which do not read from the future, as those actions cannot be traced until
1843 * their "promise" is fulfilled. Similarly, we may not even establish the
1844 * presence of a release sequence with certainty, as some modification order
1845 * constraints may be decided further in the future. Thus, this function
1846 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1847 * and a boolean representing certainty.
1849 * @param rf The action that might be part of a release sequence. Must be a
1851 * @param release_heads A pass-by-reference style return parameter. After
1852 * execution of this function, release_heads will contain the heads of all the
1853 * relevant release sequences, if any exists with certainty
1854 * @param pending A pass-by-reference style return parameter which is only used
1855 * when returning false (i.e., uncertain). Returns most information regarding
1856 * an uncertain release sequence, including any write operations that might
1857 * break the sequence.
1858 * @return true, if the ModelChecker is certain that release_heads is complete;
1861 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1862 rel_heads_list_t *release_heads,
1863 struct release_seq *pending) const
1865 /* Only check for release sequences if there are no cycles */
1866 if (mo_graph->checkForCycles())
1870 ASSERT(rf->is_write());
1872 if (rf->is_release())
1873 release_heads->push_back(rf);
1874 else if (rf->get_last_fence_release())
1875 release_heads->push_back(rf->get_last_fence_release());
1877 break; /* End of RMW chain */
1879 /** @todo Need to be smarter here... In the linux lock
1880 * example, this will run to the beginning of the program for
1882 /** @todo The way to be smarter here is to keep going until 1
1883 * thread has a release preceded by an acquire and you've seen
1886 /* acq_rel RMW is a sufficient stopping condition */
1887 if (rf->is_acquire() && rf->is_release())
1888 return true; /* complete */
1890 rf = rf->get_reads_from();
1893 /* read from future: need to settle this later */
1895 return false; /* incomplete */
1898 if (rf->is_release())
1899 return true; /* complete */
1901 /* else relaxed write
1902 * - check for fence-release in the same thread (29.8, stmt. 3)
1903 * - check modification order for contiguous subsequence
1904 * -> rf must be same thread as release */
1906 const ModelAction *fence_release = rf->get_last_fence_release();
1907 /* Synchronize with a fence-release unconditionally; we don't need to
1908 * find any more "contiguous subsequence..." for it */
1910 release_heads->push_back(fence_release);
1912 int tid = id_to_int(rf->get_tid());
1913 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1914 action_list_t *list = &(*thrd_lists)[tid];
1915 action_list_t::const_reverse_iterator rit;
1917 /* Find rf in the thread list */
1918 rit = std::find(list->rbegin(), list->rend(), rf);
1919 ASSERT(rit != list->rend());
1921 /* Find the last {write,fence}-release */
1922 for (; rit != list->rend(); rit++) {
1923 if (fence_release && *(*rit) < *fence_release)
1925 if ((*rit)->is_release())
1928 if (rit == list->rend()) {
1929 /* No write-release in this thread */
1930 return true; /* complete */
1931 } else if (fence_release && *(*rit) < *fence_release) {
1932 /* The fence-release is more recent (and so, "stronger") than
1933 * the most recent write-release */
1934 return true; /* complete */
1935 } /* else, need to establish contiguous release sequence */
1936 ModelAction *release = *rit;
1938 ASSERT(rf->same_thread(release));
1940 pending->writes.clear();
1942 bool certain = true;
1943 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1944 if (id_to_int(rf->get_tid()) == (int)i)
1946 list = &(*thrd_lists)[i];
1948 /* Can we ensure no future writes from this thread may break
1949 * the release seq? */
1950 bool future_ordered = false;
1952 ModelAction *last = get_last_action(int_to_id(i));
1953 Thread *th = get_thread(int_to_id(i));
1954 if ((last && rf->happens_before(last)) ||
1957 future_ordered = true;
1959 ASSERT(!th->is_model_thread() || future_ordered);
1961 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1962 const ModelAction *act = *rit;
1963 /* Reach synchronization -> this thread is complete */
1964 if (act->happens_before(release))
1966 if (rf->happens_before(act)) {
1967 future_ordered = true;
1971 /* Only non-RMW writes can break release sequences */
1972 if (!act->is_write() || act->is_rmw())
1975 /* Check modification order */
1976 if (mo_graph->checkReachable(rf, act)) {
1977 /* rf --mo--> act */
1978 future_ordered = true;
1981 if (mo_graph->checkReachable(act, release))
1982 /* act --mo--> release */
1984 if (mo_graph->checkReachable(release, act) &&
1985 mo_graph->checkReachable(act, rf)) {
1986 /* release --mo-> act --mo--> rf */
1987 return true; /* complete */
1989 /* act may break release sequence */
1990 pending->writes.push_back(act);
1993 if (!future_ordered)
1994 certain = false; /* This thread is uncertain */
1998 release_heads->push_back(release);
1999 pending->writes.clear();
2001 pending->release = release;
2008 * An interface for getting the release sequence head(s) with which a
2009 * given ModelAction must synchronize. This function only returns a non-empty
2010 * result when it can locate a release sequence head with certainty. Otherwise,
2011 * it may mark the internal state of the ModelChecker so that it will handle
2012 * the release sequence at a later time, causing @a acquire to update its
2013 * synchronization at some later point in execution.
2015 * @param acquire The 'acquire' action that may synchronize with a release
2017 * @param read The read action that may read from a release sequence; this may
2018 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2019 * when 'acquire' is a fence-acquire)
2020 * @param release_heads A pass-by-reference return parameter. Will be filled
2021 * with the head(s) of the release sequence(s), if they exists with certainty.
2022 * @see ModelChecker::release_seq_heads
2024 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2025 ModelAction *read, rel_heads_list_t *release_heads)
2027 const ModelAction *rf = read->get_reads_from();
2028 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2029 sequence->acquire = acquire;
2030 sequence->read = read;
2032 if (!release_seq_heads(rf, release_heads, sequence)) {
2033 /* add act to 'lazy checking' list */
2034 pending_rel_seqs->push_back(sequence);
2036 snapshot_free(sequence);
2041 * Attempt to resolve all stashed operations that might synchronize with a
2042 * release sequence for a given location. This implements the "lazy" portion of
2043 * determining whether or not a release sequence was contiguous, since not all
2044 * modification order information is present at the time an action occurs.
2046 * @param location The location/object that should be checked for release
2047 * sequence resolutions. A NULL value means to check all locations.
2048 * @param work_queue The work queue to which to add work items as they are
2050 * @return True if any updates occurred (new synchronization, new mo_graph
2053 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2055 bool updated = false;
2056 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2057 while (it != pending_rel_seqs->end()) {
2058 struct release_seq *pending = *it;
2059 ModelAction *acquire = pending->acquire;
2060 const ModelAction *read = pending->read;
2062 /* Only resolve sequences on the given location, if provided */
2063 if (location && read->get_location() != location) {
2068 const ModelAction *rf = read->get_reads_from();
2069 rel_heads_list_t release_heads;
2071 complete = release_seq_heads(rf, &release_heads, pending);
2072 for (unsigned int i = 0; i < release_heads.size(); i++) {
2073 if (!acquire->has_synchronized_with(release_heads[i])) {
2074 if (acquire->synchronize_with(release_heads[i]))
2077 set_bad_synchronization();
2082 /* Re-check all pending release sequences */
2083 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2084 /* Re-check read-acquire for mo_graph edges */
2085 if (acquire->is_read())
2086 work_queue->push_back(MOEdgeWorkEntry(acquire));
2088 /* propagate synchronization to later actions */
2089 action_list_t::reverse_iterator rit = action_trace->rbegin();
2090 for (; (*rit) != acquire; rit++) {
2091 ModelAction *propagate = *rit;
2092 if (acquire->happens_before(propagate)) {
2093 propagate->synchronize_with(acquire);
2094 /* Re-check 'propagate' for mo_graph edges */
2095 work_queue->push_back(MOEdgeWorkEntry(propagate));
2100 it = pending_rel_seqs->erase(it);
2101 snapshot_free(pending);
2107 // If we resolved promises or data races, see if we have realized a data race.
2114 * Performs various bookkeeping operations for the current ModelAction. For
2115 * instance, adds action to the per-object, per-thread action vector and to the
2116 * action trace list of all thread actions.
2118 * @param act is the ModelAction to add.
2120 void ModelChecker::add_action_to_lists(ModelAction *act)
2122 int tid = id_to_int(act->get_tid());
2123 ModelAction *uninit = NULL;
2125 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2126 if (list->empty() && act->is_atomic_var()) {
2127 uninit = new_uninitialized_action(act->get_location());
2128 uninit_id = id_to_int(uninit->get_tid());
2129 list->push_back(uninit);
2131 list->push_back(act);
2133 action_trace->push_back(act);
2135 action_trace->push_front(uninit);
2137 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2138 if (tid >= (int)vec->size())
2139 vec->resize(priv->next_thread_id);
2140 (*vec)[tid].push_back(act);
2142 (*vec)[uninit_id].push_front(uninit);
2144 if ((int)thrd_last_action->size() <= tid)
2145 thrd_last_action->resize(get_num_threads());
2146 (*thrd_last_action)[tid] = act;
2148 (*thrd_last_action)[uninit_id] = uninit;
2150 if (act->is_fence() && act->is_release()) {
2151 if ((int)thrd_last_fence_release->size() <= tid)
2152 thrd_last_fence_release->resize(get_num_threads());
2153 (*thrd_last_fence_release)[tid] = act;
2156 if (act->is_wait()) {
2157 void *mutex_loc = (void *) act->get_value();
2158 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2160 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2161 if (tid >= (int)vec->size())
2162 vec->resize(priv->next_thread_id);
2163 (*vec)[tid].push_back(act);
2168 * @brief Get the last action performed by a particular Thread
2169 * @param tid The thread ID of the Thread in question
2170 * @return The last action in the thread
2172 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2174 int threadid = id_to_int(tid);
2175 if (threadid < (int)thrd_last_action->size())
2176 return (*thrd_last_action)[id_to_int(tid)];
2182 * @brief Get the last fence release performed by a particular Thread
2183 * @param tid The thread ID of the Thread in question
2184 * @return The last fence release in the thread, if one exists; NULL otherwise
2186 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2188 int threadid = id_to_int(tid);
2189 if (threadid < (int)thrd_last_fence_release->size())
2190 return (*thrd_last_fence_release)[id_to_int(tid)];
2196 * Gets the last memory_order_seq_cst write (in the total global sequence)
2197 * performed on a particular object (i.e., memory location), not including the
2199 * @param curr The current ModelAction; also denotes the object location to
2201 * @return The last seq_cst write
2203 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2205 void *location = curr->get_location();
2206 action_list_t *list = get_safe_ptr_action(obj_map, location);
2207 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2208 action_list_t::reverse_iterator rit;
2209 for (rit = list->rbegin(); rit != list->rend(); rit++)
2210 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2216 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2217 * performed in a particular thread, prior to a particular fence.
2218 * @param tid The ID of the thread to check
2219 * @param before_fence The fence from which to begin the search; if NULL, then
2220 * search for the most recent fence in the thread.
2221 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2223 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2225 /* All fences should have NULL location */
2226 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2227 action_list_t::reverse_iterator rit = list->rbegin();
2230 for (; rit != list->rend(); rit++)
2231 if (*rit == before_fence)
2234 ASSERT(*rit == before_fence);
2238 for (; rit != list->rend(); rit++)
2239 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2245 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2246 * location). This function identifies the mutex according to the current
2247 * action, which is presumed to perform on the same mutex.
2248 * @param curr The current ModelAction; also denotes the object location to
2250 * @return The last unlock operation
2252 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2254 void *location = curr->get_location();
2255 action_list_t *list = get_safe_ptr_action(obj_map, location);
2256 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2257 action_list_t::reverse_iterator rit;
2258 for (rit = list->rbegin(); rit != list->rend(); rit++)
2259 if ((*rit)->is_unlock() || (*rit)->is_wait())
2264 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2266 ModelAction *parent = get_last_action(tid);
2268 parent = get_thread(tid)->get_creation();
2273 * Returns the clock vector for a given thread.
2274 * @param tid The thread whose clock vector we want
2275 * @return Desired clock vector
2277 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2279 return get_parent_action(tid)->get_cv();
2283 * Resolve a set of Promises with a current write. The set is provided in the
2284 * Node corresponding to @a write.
2285 * @param write The ModelAction that is fulfilling Promises
2286 * @return True if promises were resolved; false otherwise
2288 bool ModelChecker::resolve_promises(ModelAction *write)
2290 bool resolved = false;
2291 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2293 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2294 Promise *promise = (*promises)[promise_index];
2295 if (write->get_node()->get_promise(i)) {
2296 ModelAction *read = promise->get_action();
2297 if (read->is_rmw()) {
2298 mo_graph->addRMWEdge(write, read);
2300 read_from(read, write);
2301 //First fix up the modification order for actions that happened
2303 r_modification_order(read, write);
2304 //Next fix up the modification order for actions that happened
2306 post_r_modification_order(read, write);
2307 //Make sure the promise's value matches the write's value
2308 ASSERT(promise->get_value() == write->get_value());
2311 promises->erase(promises->begin() + promise_index);
2312 threads_to_check.push_back(read->get_tid());
2319 //Check whether reading these writes has made threads unable to
2322 for (unsigned int i = 0; i < threads_to_check.size(); i++)
2323 mo_check_promises(threads_to_check[i], write);
2329 * Compute the set of promises that could potentially be satisfied by this
2330 * action. Note that the set computation actually appears in the Node, not in
2332 * @param curr The ModelAction that may satisfy promises
2334 void ModelChecker::compute_promises(ModelAction *curr)
2336 for (unsigned int i = 0; i < promises->size(); i++) {
2337 Promise *promise = (*promises)[i];
2338 const ModelAction *act = promise->get_action();
2339 if (!act->happens_before(curr) &&
2341 !act->could_synchronize_with(curr) &&
2342 !act->same_thread(curr) &&
2343 act->get_location() == curr->get_location() &&
2344 promise->get_value() == curr->get_value()) {
2345 curr->get_node()->set_promise(i, act->is_rmw());
2350 /** Checks promises in response to change in ClockVector Threads. */
2351 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2353 for (unsigned int i = 0; i < promises->size(); i++) {
2354 Promise *promise = (*promises)[i];
2355 const ModelAction *act = promise->get_action();
2356 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2357 merge_cv->synchronized_since(act)) {
2358 if (promise->increment_threads(tid)) {
2359 //Promise has failed
2360 priv->failed_promise = true;
2367 void ModelChecker::check_promises_thread_disabled() {
2368 for (unsigned int i = 0; i < promises->size(); i++) {
2369 Promise *promise = (*promises)[i];
2370 if (promise->check_promise()) {
2371 priv->failed_promise = true;
2377 /** Checks promises in response to addition to modification order for threads.
2379 * pthread is the thread that performed the read that created the promise
2381 * pread is the read that created the promise
2383 * pwrite is either the first write to same location as pread by
2384 * pthread that is sequenced after pread or the value read by the
2385 * first read to the same lcoation as pread by pthread that is
2386 * sequenced after pread..
2388 * 1. If tid=pthread, then we check what other threads are reachable
2389 * through the mode order starting with pwrite. Those threads cannot
2390 * perform a write that will resolve the promise due to modification
2391 * order constraints.
2393 * 2. If the tid is not pthread, we check whether pwrite can reach the
2394 * action write through the modification order. If so, that thread
2395 * cannot perform a future write that will resolve the promise due to
2396 * modificatin order constraints.
2398 * @param tid The thread that either read from the model action
2399 * write, or actually did the model action write.
2401 * @param write The ModelAction representing the relevant write.
2403 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write)
2405 void *location = write->get_location();
2406 for (unsigned int i = 0; i < promises->size(); i++) {
2407 Promise *promise = (*promises)[i];
2408 const ModelAction *act = promise->get_action();
2410 //Is this promise on the same location?
2411 if (act->get_location() != location)
2414 //same thread as the promise
2415 if (act->get_tid() == tid) {
2417 //do we have a pwrite for the promise, if not, set it
2418 if (promise->get_write() == NULL) {
2419 promise->set_write(write);
2420 //The pwrite cannot happen before the promise
2421 if (write->happens_before(act) && (write != act)) {
2422 priv->failed_promise = true;
2426 if (mo_graph->checkPromise(write, promise)) {
2427 priv->failed_promise = true;
2432 //Don't do any lookups twice for the same thread
2433 if (promise->has_sync_thread(tid))
2436 if (promise->get_write() && mo_graph->checkReachable(promise->get_write(), write)) {
2437 if (promise->increment_threads(tid)) {
2438 priv->failed_promise = true;
2446 * Compute the set of writes that may break the current pending release
2447 * sequence. This information is extracted from previou release sequence
2450 * @param curr The current ModelAction. Must be a release sequence fixup
2453 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2455 if (pending_rel_seqs->empty())
2458 struct release_seq *pending = pending_rel_seqs->back();
2459 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2460 const ModelAction *write = pending->writes[i];
2461 curr->get_node()->add_relseq_break(write);
2464 /* NULL means don't break the sequence; just synchronize */
2465 curr->get_node()->add_relseq_break(NULL);
2469 * Build up an initial set of all past writes that this 'read' action may read
2470 * from. This set is determined by the clock vector's "happens before"
2472 * @param curr is the current ModelAction that we are exploring; it must be a
2475 void ModelChecker::build_reads_from_past(ModelAction *curr)
2477 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2479 ASSERT(curr->is_read());
2481 ModelAction *last_sc_write = NULL;
2483 if (curr->is_seqcst())
2484 last_sc_write = get_last_seq_cst_write(curr);
2486 /* Iterate over all threads */
2487 for (i = 0; i < thrd_lists->size(); i++) {
2488 /* Iterate over actions in thread, starting from most recent */
2489 action_list_t *list = &(*thrd_lists)[i];
2490 action_list_t::reverse_iterator rit;
2491 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2492 ModelAction *act = *rit;
2494 /* Only consider 'write' actions */
2495 if (!act->is_write() || act == curr)
2498 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2499 bool allow_read = true;
2501 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2503 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2507 DEBUG("Adding action to may_read_from:\n");
2508 if (DBG_ENABLED()) {
2512 curr->get_node()->add_read_from(act);
2515 /* Include at most one act per-thread that "happens before" curr */
2516 if (act->happens_before(curr))
2521 if (DBG_ENABLED()) {
2522 model_print("Reached read action:\n");
2524 model_print("Printing may_read_from\n");
2525 curr->get_node()->print_may_read_from();
2526 model_print("End printing may_read_from\n");
2530 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2533 /* UNINIT actions don't have a Node, and they never sleep */
2534 if (write->is_uninitialized())
2536 Node *prevnode = write->get_node()->get_parent();
2538 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2539 if (write->is_release() && thread_sleep)
2541 if (!write->is_rmw()) {
2544 if (write->get_reads_from() == NULL)
2546 write = write->get_reads_from();
2551 * @brief Create a new action representing an uninitialized atomic
2552 * @param location The memory location of the atomic object
2553 * @return A pointer to a new ModelAction
2555 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2557 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2558 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2559 act->create_cv(NULL);
2563 static void print_list(action_list_t *list, int exec_num = -1)
2565 action_list_t::iterator it;
2567 model_print("---------------------------------------------------------------------\n");
2569 model_print("Execution %d:\n", exec_num);
2571 unsigned int hash = 0;
2573 for (it = list->begin(); it != list->end(); it++) {
2575 hash = hash^(hash<<3)^((*it)->hash());
2577 model_print("HASH %u\n", hash);
2578 model_print("---------------------------------------------------------------------\n");
2581 #if SUPPORT_MOD_ORDER_DUMP
2582 void ModelChecker::dumpGraph(char *filename) const
2585 sprintf(buffer, "%s.dot", filename);
2586 FILE *file = fopen(buffer, "w");
2587 fprintf(file, "digraph %s {\n", filename);
2588 mo_graph->dumpNodes(file);
2589 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2591 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2592 ModelAction *action = *it;
2593 if (action->is_read()) {
2594 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(), action->get_seq_number(), action->get_tid());
2595 if (action->get_reads_from() != NULL)
2596 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2598 if (thread_array[action->get_tid()] != NULL) {
2599 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2602 thread_array[action->get_tid()] = action;
2604 fprintf(file, "}\n");
2605 model_free(thread_array);
2610 /** @brief Prints an execution trace summary. */
2611 void ModelChecker::print_summary() const
2613 #if SUPPORT_MOD_ORDER_DUMP
2615 char buffername[100];
2616 sprintf(buffername, "exec%04u", stats.num_total);
2617 mo_graph->dumpGraphToFile(buffername);
2618 sprintf(buffername, "graph%04u", stats.num_total);
2619 dumpGraph(buffername);
2622 if (!isfeasibleprefix())
2623 model_print("INFEASIBLE EXECUTION!\n");
2624 print_list(action_trace, stats.num_total);
2629 * Add a Thread to the system for the first time. Should only be called once
2631 * @param t The Thread to add
2633 void ModelChecker::add_thread(Thread *t)
2635 thread_map->put(id_to_int(t->get_id()), t);
2636 scheduler->add_thread(t);
2640 * Removes a thread from the scheduler.
2641 * @param the thread to remove.
2643 void ModelChecker::remove_thread(Thread *t)
2645 scheduler->remove_thread(t);
2649 * @brief Get a Thread reference by its ID
2650 * @param tid The Thread's ID
2651 * @return A Thread reference
2653 Thread * ModelChecker::get_thread(thread_id_t tid) const
2655 return thread_map->get(id_to_int(tid));
2659 * @brief Get a reference to the Thread in which a ModelAction was executed
2660 * @param act The ModelAction
2661 * @return A Thread reference
2663 Thread * ModelChecker::get_thread(ModelAction *act) const
2665 return get_thread(act->get_tid());
2669 * @brief Check if a Thread is currently enabled
2670 * @param t The Thread to check
2671 * @return True if the Thread is currently enabled
2673 bool ModelChecker::is_enabled(Thread *t) const
2675 return scheduler->is_enabled(t);
2679 * @brief Check if a Thread is currently enabled
2680 * @param tid The ID of the Thread to check
2681 * @return True if the Thread is currently enabled
2683 bool ModelChecker::is_enabled(thread_id_t tid) const
2685 return scheduler->is_enabled(tid);
2689 * Switch from a user-context to the "master thread" context (a.k.a. system
2690 * context). This switch is made with the intention of exploring a particular
2691 * model-checking action (described by a ModelAction object). Must be called
2692 * from a user-thread context.
2694 * @param act The current action that will be explored. May be NULL only if
2695 * trace is exiting via an assertion (see ModelChecker::set_assert and
2696 * ModelChecker::has_asserted).
2697 * @return Return the value returned by the current action
2699 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2702 Thread *old = thread_current();
2703 set_current_action(act);
2704 old->set_state(THREAD_READY);
2705 if (Thread::swap(old, &system_context) < 0) {
2706 perror("swap threads");
2709 return old->get_return_value();
2713 * Takes the next step in the execution, if possible.
2714 * @param curr The current step to take
2715 * @return Returns true (success) if a step was taken and false otherwise.
2717 bool ModelChecker::take_step(ModelAction *curr)
2722 Thread *curr_thrd = get_thread(curr);
2723 ASSERT(curr_thrd->get_state() == THREAD_READY);
2725 Thread *next_thrd = check_current_action(curr);
2727 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2728 scheduler->remove_thread(curr_thrd);
2730 next_thrd = scheduler->next_thread(next_thrd);
2732 /* Infeasible -> don't take any more steps */
2733 if (is_infeasible())
2735 else if (isfeasibleprefix() && have_bug_reports()) {
2740 if (params.bound != 0) {
2741 if (priv->used_sequence_numbers > params.bound) {
2746 DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2747 next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2750 * Launch end-of-execution release sequence fixups only when there are:
2752 * (1) no more user threads to run (or when execution replay chooses
2753 * the 'model_thread')
2754 * (2) pending release sequences
2755 * (3) pending assertions (i.e., data races)
2756 * (4) no pending promises
2758 if (!pending_rel_seqs->empty() && (!next_thrd || next_thrd->is_model_thread()) &&
2759 is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2760 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2761 pending_rel_seqs->size());
2762 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2763 std::memory_order_seq_cst, NULL, VALUE_NONE,
2765 set_current_action(fixup);
2769 /* next_thrd == NULL -> don't take any more steps */
2773 next_thrd->set_state(THREAD_RUNNING);
2775 if (next_thrd->get_pending() != NULL) {
2776 /* restart a pending action */
2777 set_current_action(next_thrd->get_pending());
2778 next_thrd->set_pending(NULL);
2779 next_thrd->set_state(THREAD_READY);
2783 /* Return false only if swap fails with an error */
2784 return (Thread::swap(&system_context, next_thrd) == 0);
2787 /** Wrapper to run the user's main function, with appropriate arguments */
2788 void user_main_wrapper(void *)
2790 user_main(model->params.argc, model->params.argv);
2793 /** @brief Run ModelChecker for the user program */
2794 void ModelChecker::run()
2798 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL);
2802 /* Run user thread up to its first action */
2803 scheduler->next_thread(t);
2804 Thread::swap(&system_context, t);
2806 /* Wait for all threads to complete */
2807 while (take_step(priv->current_action));
2808 } while (next_execution());