10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
43 /* First thread created will have id INITIAL_THREAD_ID */
44 next_thread_id(INITIAL_THREAD_ID),
45 used_sequence_numbers(0),
50 failed_promise(false),
51 too_many_reads(false),
52 bad_synchronization(false),
56 ~model_snapshot_members() {
57 for (unsigned int i = 0; i < bugs.size(); i++)
62 ModelAction *current_action;
63 unsigned int next_thread_id;
64 modelclock_t used_sequence_numbers;
66 ModelAction *next_backtrack;
67 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
68 struct execution_stats stats;
71 /** @brief Incorrectly-ordered synchronization was made */
72 bool bad_synchronization;
78 /** @brief Constructor */
79 ModelChecker::ModelChecker(struct model_params params) :
80 /* Initialize default scheduler */
82 scheduler(new Scheduler()),
84 earliest_diverge(NULL),
85 action_trace(new action_list_t()),
86 thread_map(new HashTable<int, Thread *, int>()),
87 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
90 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
91 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
92 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
93 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
94 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
95 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
96 node_stack(new NodeStack()),
97 priv(new struct model_snapshot_members()),
98 mo_graph(new CycleGraph())
100 /* Initialize a model-checker thread, for special ModelActions */
101 model_thread = new Thread(get_next_id());
102 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
105 /** @brief Destructor */
106 ModelChecker::~ModelChecker()
108 for (unsigned int i = 0; i < get_num_threads(); i++)
109 delete thread_map->get(i);
114 delete lock_waiters_map;
115 delete condvar_waiters_map;
118 for (unsigned int i = 0; i < promises->size(); i++)
119 delete (*promises)[i];
122 delete pending_rel_seqs;
124 delete thrd_last_action;
125 delete thrd_last_fence_release;
132 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
134 action_list_t *tmp = hash->get(ptr);
136 tmp = new action_list_t();
142 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
144 std::vector<action_list_t> *tmp = hash->get(ptr);
146 tmp = new std::vector<action_list_t>();
153 * Restores user program to initial state and resets all model-checker data
156 void ModelChecker::reset_to_initial_state()
158 DEBUG("+++ Resetting to initial state +++\n");
159 node_stack->reset_execution();
161 /* Print all model-checker output before rollback */
164 snapshotObject->backTrackBeforeStep(0);
167 /** @return a thread ID for a new Thread */
168 thread_id_t ModelChecker::get_next_id()
170 return priv->next_thread_id++;
173 /** @return the number of user threads created during this execution */
174 unsigned int ModelChecker::get_num_threads() const
176 return priv->next_thread_id;
179 /** @return The currently executing Thread. */
180 Thread * ModelChecker::get_current_thread() const
182 return scheduler->get_current_thread();
185 /** @return a sequence number for a new ModelAction */
186 modelclock_t ModelChecker::get_next_seq_num()
188 return ++priv->used_sequence_numbers;
191 Node * ModelChecker::get_curr_node() const
193 return node_stack->get_head();
197 * @brief Choose the next thread to execute.
199 * This function chooses the next thread that should execute. It can force the
200 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
201 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
202 * The model-checker may have no preference regarding the next thread (i.e.,
203 * when exploring a new execution ordering), in which case this will return
205 * @param curr The current ModelAction. This action might guide the choice of
207 * @return The next thread to run. If the model-checker has no preference, NULL.
209 Thread * ModelChecker::get_next_thread(ModelAction *curr)
214 /* Do not split atomic actions. */
216 return thread_current();
217 /* The THREAD_CREATE action points to the created Thread */
218 else if (curr->get_type() == THREAD_CREATE)
219 return (Thread *)curr->get_location();
222 /* Have we completed exploring the preselected path? */
226 /* Else, we are trying to replay an execution */
227 ModelAction *next = node_stack->get_next()->get_action();
229 if (next == diverge) {
230 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
231 earliest_diverge=diverge;
233 Node *nextnode = next->get_node();
234 Node *prevnode = nextnode->get_parent();
235 scheduler->update_sleep_set(prevnode);
237 /* Reached divergence point */
238 if (nextnode->increment_misc()) {
239 /* The next node will try to satisfy a different misc_index values. */
240 tid = next->get_tid();
241 node_stack->pop_restofstack(2);
242 } else if (nextnode->increment_promise()) {
243 /* The next node will try to satisfy a different set of promises. */
244 tid = next->get_tid();
245 node_stack->pop_restofstack(2);
246 } else if (nextnode->increment_read_from()) {
247 /* The next node will read from a different value. */
248 tid = next->get_tid();
249 node_stack->pop_restofstack(2);
250 } else if (nextnode->increment_future_value()) {
251 /* The next node will try to read from a different future value. */
252 tid = next->get_tid();
253 node_stack->pop_restofstack(2);
254 } else if (nextnode->increment_relseq_break()) {
255 /* The next node will try to resolve a release sequence differently */
256 tid = next->get_tid();
257 node_stack->pop_restofstack(2);
259 /* Make a different thread execute for next step */
260 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
261 tid = prevnode->get_next_backtrack();
262 /* Make sure the backtracked thread isn't sleeping. */
263 node_stack->pop_restofstack(1);
264 if (diverge==earliest_diverge) {
265 earliest_diverge=prevnode->get_action();
268 /* The correct sleep set is in the parent node. */
271 DEBUG("*** Divergence point ***\n");
275 tid = next->get_tid();
277 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
278 ASSERT(tid != THREAD_ID_T_NONE);
279 return thread_map->get(id_to_int(tid));
283 * We need to know what the next actions of all threads in the sleep
284 * set will be. This method computes them and stores the actions at
285 * the corresponding thread object's pending action.
288 void ModelChecker::execute_sleep_set()
290 for (unsigned int i = 0; i < get_num_threads(); i++) {
291 thread_id_t tid = int_to_id(i);
292 Thread *thr = get_thread(tid);
293 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
294 thr->set_state(THREAD_RUNNING);
295 scheduler->next_thread(thr);
296 Thread::swap(&system_context, thr);
297 priv->current_action->set_sleep_flag();
298 thr->set_pending(priv->current_action);
301 priv->current_action = NULL;
304 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
306 for (unsigned int i = 0; i < get_num_threads(); i++) {
307 Thread *thr = get_thread(int_to_id(i));
308 if (scheduler->is_sleep_set(thr)) {
309 ModelAction *pending_act = thr->get_pending();
310 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
311 //Remove this thread from sleep set
312 scheduler->remove_sleep(thr);
317 /** @brief Alert the model-checker that an incorrectly-ordered
318 * synchronization was made */
319 void ModelChecker::set_bad_synchronization()
321 priv->bad_synchronization = true;
324 bool ModelChecker::has_asserted() const
326 return priv->asserted;
329 void ModelChecker::set_assert()
331 priv->asserted = true;
335 * Check if we are in a deadlock. Should only be called at the end of an
336 * execution, although it should not give false positives in the middle of an
337 * execution (there should be some ENABLED thread).
339 * @return True if program is in a deadlock; false otherwise
341 bool ModelChecker::is_deadlocked() const
343 bool blocking_threads = false;
344 for (unsigned int i = 0; i < get_num_threads(); i++) {
345 thread_id_t tid = int_to_id(i);
348 Thread *t = get_thread(tid);
349 if (!t->is_model_thread() && t->get_pending())
350 blocking_threads = true;
352 return blocking_threads;
356 * Check if this is a complete execution. That is, have all thread completed
357 * execution (rather than exiting because sleep sets have forced a redundant
360 * @return True if the execution is complete.
362 bool ModelChecker::is_complete_execution() const
364 for (unsigned int i = 0; i < get_num_threads(); i++)
365 if (is_enabled(int_to_id(i)))
371 * @brief Assert a bug in the executing program.
373 * Use this function to assert any sort of bug in the user program. If the
374 * current trace is feasible (actually, a prefix of some feasible execution),
375 * then this execution will be aborted, printing the appropriate message. If
376 * the current trace is not yet feasible, the error message will be stashed and
377 * printed if the execution ever becomes feasible.
379 * @param msg Descriptive message for the bug (do not include newline char)
380 * @return True if bug is immediately-feasible
382 bool ModelChecker::assert_bug(const char *msg)
384 priv->bugs.push_back(new bug_message(msg));
386 if (isfeasibleprefix()) {
394 * @brief Assert a bug in the executing program, asserted by a user thread
395 * @see ModelChecker::assert_bug
396 * @param msg Descriptive message for the bug (do not include newline char)
398 void ModelChecker::assert_user_bug(const char *msg)
400 /* If feasible bug, bail out now */
402 switch_to_master(NULL);
405 /** @return True, if any bugs have been reported for this execution */
406 bool ModelChecker::have_bug_reports() const
408 return priv->bugs.size() != 0;
411 /** @brief Print bug report listing for this execution (if any bugs exist) */
412 void ModelChecker::print_bugs() const
414 if (have_bug_reports()) {
415 model_print("Bug report: %zu bug%s detected\n",
417 priv->bugs.size() > 1 ? "s" : "");
418 for (unsigned int i = 0; i < priv->bugs.size(); i++)
419 priv->bugs[i]->print();
424 * @brief Record end-of-execution stats
426 * Must be run when exiting an execution. Records various stats.
427 * @see struct execution_stats
429 void ModelChecker::record_stats()
432 if (!isfeasibleprefix())
433 stats.num_infeasible++;
434 else if (have_bug_reports())
435 stats.num_buggy_executions++;
436 else if (is_complete_execution())
437 stats.num_complete++;
439 stats.num_redundant++;
442 /** @brief Print execution stats */
443 void ModelChecker::print_stats() const
445 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
446 model_print("Number of redundant executions: %d\n", stats.num_redundant);
447 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
448 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
449 model_print("Total executions: %d\n", stats.num_total);
450 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
454 * @brief End-of-exeuction print
455 * @param printbugs Should any existing bugs be printed?
457 void ModelChecker::print_execution(bool printbugs) const
459 print_program_output();
461 if (DBG_ENABLED() || params.verbose) {
462 model_print("Earliest divergence point since last feasible execution:\n");
463 if (earliest_diverge)
464 earliest_diverge->print();
466 model_print("(Not set)\n");
472 /* Don't print invalid bugs */
481 * Queries the model-checker for more executions to explore and, if one
482 * exists, resets the model-checker state to execute a new execution.
484 * @return If there are more executions to explore, return true. Otherwise,
487 bool ModelChecker::next_execution()
490 /* Is this execution a feasible execution that's worth bug-checking? */
491 bool complete = isfeasibleprefix() && (is_complete_execution() ||
494 /* End-of-execution bug checks */
497 assert_bug("Deadlock detected");
505 if (DBG_ENABLED() || params.verbose || have_bug_reports())
506 print_execution(complete);
508 clear_program_output();
511 earliest_diverge = NULL;
513 if ((diverge = get_next_backtrack()) == NULL)
517 model_print("Next execution will diverge at:\n");
521 reset_to_initial_state();
525 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
527 switch (act->get_type()) {
532 /* Optimization: relaxed operations don't need backtracking */
533 if (act->is_relaxed())
535 /* linear search: from most recent to oldest */
536 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
537 action_list_t::reverse_iterator rit;
538 for (rit = list->rbegin(); rit != list->rend(); rit++) {
539 ModelAction *prev = *rit;
540 if (prev->could_synchronize_with(act))
546 case ATOMIC_TRYLOCK: {
547 /* linear search: from most recent to oldest */
548 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
549 action_list_t::reverse_iterator rit;
550 for (rit = list->rbegin(); rit != list->rend(); rit++) {
551 ModelAction *prev = *rit;
552 if (act->is_conflicting_lock(prev))
557 case ATOMIC_UNLOCK: {
558 /* linear search: from most recent to oldest */
559 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
560 action_list_t::reverse_iterator rit;
561 for (rit = list->rbegin(); rit != list->rend(); rit++) {
562 ModelAction *prev = *rit;
563 if (!act->same_thread(prev)&&prev->is_failed_trylock())
569 /* linear search: from most recent to oldest */
570 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
571 action_list_t::reverse_iterator rit;
572 for (rit = list->rbegin(); rit != list->rend(); rit++) {
573 ModelAction *prev = *rit;
574 if (!act->same_thread(prev)&&prev->is_failed_trylock())
576 if (!act->same_thread(prev)&&prev->is_notify())
582 case ATOMIC_NOTIFY_ALL:
583 case ATOMIC_NOTIFY_ONE: {
584 /* linear search: from most recent to oldest */
585 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
586 action_list_t::reverse_iterator rit;
587 for (rit = list->rbegin(); rit != list->rend(); rit++) {
588 ModelAction *prev = *rit;
589 if (!act->same_thread(prev)&&prev->is_wait())
600 /** This method finds backtracking points where we should try to
601 * reorder the parameter ModelAction against.
603 * @param the ModelAction to find backtracking points for.
605 void ModelChecker::set_backtracking(ModelAction *act)
607 Thread *t = get_thread(act);
608 ModelAction *prev = get_last_conflict(act);
612 Node *node = prev->get_node()->get_parent();
614 int low_tid, high_tid;
615 if (node->is_enabled(t)) {
616 low_tid = id_to_int(act->get_tid());
617 high_tid = low_tid+1;
620 high_tid = get_num_threads();
623 for(int i = low_tid; i < high_tid; i++) {
624 thread_id_t tid = int_to_id(i);
626 /* Make sure this thread can be enabled here. */
627 if (i >= node->get_num_threads())
630 /* Don't backtrack into a point where the thread is disabled or sleeping. */
631 if (node->enabled_status(tid)!=THREAD_ENABLED)
634 /* Check if this has been explored already */
635 if (node->has_been_explored(tid))
638 /* See if fairness allows */
639 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
641 for(int t=0;t<node->get_num_threads();t++) {
642 thread_id_t tother=int_to_id(t);
643 if (node->is_enabled(tother) && node->has_priority(tother)) {
651 /* Cache the latest backtracking point */
652 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
653 priv->next_backtrack = prev;
655 /* If this is a new backtracking point, mark the tree */
656 if (!node->set_backtrack(tid))
658 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
659 id_to_int(prev->get_tid()),
660 id_to_int(t->get_id()));
669 * Returns last backtracking point. The model checker will explore a different
670 * path for this point in the next execution.
671 * @return The ModelAction at which the next execution should diverge.
673 ModelAction * ModelChecker::get_next_backtrack()
675 ModelAction *next = priv->next_backtrack;
676 priv->next_backtrack = NULL;
681 * Processes a read or rmw model action.
682 * @param curr is the read model action to process.
683 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
684 * @return True if processing this read updates the mo_graph.
686 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
688 uint64_t value = VALUE_NONE;
689 bool updated = false;
691 const ModelAction *reads_from = curr->get_node()->get_read_from();
692 if (reads_from != NULL) {
693 mo_graph->startChanges();
695 value = reads_from->get_value();
696 bool r_status = false;
698 if (!second_part_of_rmw) {
699 check_recency(curr, reads_from);
700 r_status = r_modification_order(curr, reads_from);
704 if (!second_part_of_rmw&&is_infeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
705 mo_graph->rollbackChanges();
706 priv->too_many_reads = false;
710 read_from(curr, reads_from);
711 mo_graph->commitChanges();
712 mo_check_promises(curr->get_tid(), reads_from);
715 } else if (!second_part_of_rmw) {
716 /* Read from future value */
717 value = curr->get_node()->get_future_value();
718 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
719 curr->set_read_from(NULL);
720 Promise *valuepromise = new Promise(curr, value, expiration);
721 promises->push_back(valuepromise);
723 get_thread(curr)->set_return_value(value);
729 * Processes a lock, trylock, or unlock model action. @param curr is
730 * the read model action to process.
732 * The try lock operation checks whether the lock is taken. If not,
733 * it falls to the normal lock operation case. If so, it returns
736 * The lock operation has already been checked that it is enabled, so
737 * it just grabs the lock and synchronizes with the previous unlock.
739 * The unlock operation has to re-enable all of the threads that are
740 * waiting on the lock.
742 * @return True if synchronization was updated; false otherwise
744 bool ModelChecker::process_mutex(ModelAction *curr) {
745 std::mutex *mutex=NULL;
746 struct std::mutex_state *state=NULL;
748 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
749 mutex = (std::mutex *)curr->get_location();
750 state = mutex->get_state();
751 } else if(curr->is_wait()) {
752 mutex = (std::mutex *)curr->get_value();
753 state = mutex->get_state();
756 switch (curr->get_type()) {
757 case ATOMIC_TRYLOCK: {
758 bool success = !state->islocked;
759 curr->set_try_lock(success);
761 get_thread(curr)->set_return_value(0);
764 get_thread(curr)->set_return_value(1);
766 //otherwise fall into the lock case
768 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
769 assert_bug("Lock access before initialization");
770 state->islocked = true;
771 ModelAction *unlock = get_last_unlock(curr);
772 //synchronize with the previous unlock statement
773 if (unlock != NULL) {
774 curr->synchronize_with(unlock);
779 case ATOMIC_UNLOCK: {
781 state->islocked = false;
782 //wake up the other threads
783 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
784 //activate all the waiting threads
785 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
786 scheduler->wake(get_thread(*rit));
793 state->islocked = false;
794 //wake up the other threads
795 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
796 //activate all the waiting threads
797 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
798 scheduler->wake(get_thread(*rit));
801 //check whether we should go to sleep or not...simulate spurious failures
802 if (curr->get_node()->get_misc()==0) {
803 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
805 scheduler->sleep(get_current_thread());
809 case ATOMIC_NOTIFY_ALL: {
810 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
811 //activate all the waiting threads
812 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
813 scheduler->wake(get_thread(*rit));
818 case ATOMIC_NOTIFY_ONE: {
819 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
820 int wakeupthread=curr->get_node()->get_misc();
821 action_list_t::iterator it = waiters->begin();
822 advance(it, wakeupthread);
823 scheduler->wake(get_thread(*it));
835 * Process a write ModelAction
836 * @param curr The ModelAction to process
837 * @return True if the mo_graph was updated or promises were resolved
839 bool ModelChecker::process_write(ModelAction *curr)
841 bool updated_mod_order = w_modification_order(curr);
842 bool updated_promises = resolve_promises(curr);
844 if (promises->size() == 0) {
845 for (unsigned int i = 0; i < futurevalues->size(); i++) {
846 struct PendingFutureValue pfv = (*futurevalues)[i];
847 //Do more ambitious checks now that mo is more complete
848 if (mo_may_allow(pfv.writer, pfv.act)&&
849 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
850 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
851 priv->next_backtrack = pfv.act;
853 futurevalues->resize(0);
856 mo_graph->commitChanges();
857 mo_check_promises(curr->get_tid(), curr);
859 get_thread(curr)->set_return_value(VALUE_NONE);
860 return updated_mod_order || updated_promises;
864 * Process a fence ModelAction
865 * @param curr The ModelAction to process
866 * @return True if synchronization was updated
868 bool ModelChecker::process_fence(ModelAction *curr)
871 * fence-relaxed: no-op
872 * fence-release: only log the occurence (not in this function), for
873 * use in later synchronization
874 * fence-acquire (this function): search for hypothetical release
877 bool updated = false;
878 if (curr->is_acquire()) {
879 action_list_t *list = action_trace;
880 action_list_t::reverse_iterator rit;
881 /* Find X : is_read(X) && X --sb-> curr */
882 for (rit = list->rbegin(); rit != list->rend(); rit++) {
883 ModelAction *act = *rit;
886 if (act->get_tid() != curr->get_tid())
888 /* Stop at the beginning of the thread */
889 if (act->is_thread_start())
891 /* Stop once we reach a prior fence-acquire */
892 if (act->is_fence() && act->is_acquire())
896 /* read-acquire will find its own release sequences */
897 if (act->is_acquire())
900 /* Establish hypothetical release sequences */
901 rel_heads_list_t release_heads;
902 get_release_seq_heads(curr, act, &release_heads);
903 for (unsigned int i = 0; i < release_heads.size(); i++)
904 if (!curr->synchronize_with(release_heads[i]))
905 set_bad_synchronization();
906 if (release_heads.size() != 0)
914 * @brief Process the current action for thread-related activity
916 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
917 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
918 * synchronization, etc. This function is a no-op for non-THREAD actions
919 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
921 * @param curr The current action
922 * @return True if synchronization was updated or a thread completed
924 bool ModelChecker::process_thread_action(ModelAction *curr)
926 bool updated = false;
928 switch (curr->get_type()) {
929 case THREAD_CREATE: {
930 Thread *th = (Thread *)curr->get_location();
931 th->set_creation(curr);
935 Thread *blocking = (Thread *)curr->get_location();
936 ModelAction *act = get_last_action(blocking->get_id());
937 curr->synchronize_with(act);
938 updated = true; /* trigger rel-seq checks */
941 case THREAD_FINISH: {
942 Thread *th = get_thread(curr);
943 while (!th->wait_list_empty()) {
944 ModelAction *act = th->pop_wait_list();
945 scheduler->wake(get_thread(act));
948 updated = true; /* trigger rel-seq checks */
952 check_promises(curr->get_tid(), NULL, curr->get_cv());
963 * @brief Process the current action for release sequence fixup activity
965 * Performs model-checker release sequence fixups for the current action,
966 * forcing a single pending release sequence to break (with a given, potential
967 * "loose" write) or to complete (i.e., synchronize). If a pending release
968 * sequence forms a complete release sequence, then we must perform the fixup
969 * synchronization, mo_graph additions, etc.
971 * @param curr The current action; must be a release sequence fixup action
972 * @param work_queue The work queue to which to add work items as they are
975 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
977 const ModelAction *write = curr->get_node()->get_relseq_break();
978 struct release_seq *sequence = pending_rel_seqs->back();
979 pending_rel_seqs->pop_back();
981 ModelAction *acquire = sequence->acquire;
982 const ModelAction *rf = sequence->rf;
983 const ModelAction *release = sequence->release;
987 ASSERT(release->same_thread(rf));
991 * @todo Forcing a synchronization requires that we set
992 * modification order constraints. For instance, we can't allow
993 * a fixup sequence in which two separate read-acquire
994 * operations read from the same sequence, where the first one
995 * synchronizes and the other doesn't. Essentially, we can't
996 * allow any writes to insert themselves between 'release' and
1000 /* Must synchronize */
1001 if (!acquire->synchronize_with(release)) {
1002 set_bad_synchronization();
1005 /* Re-check all pending release sequences */
1006 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1007 /* Re-check act for mo_graph edges */
1008 work_queue->push_back(MOEdgeWorkEntry(acquire));
1010 /* propagate synchronization to later actions */
1011 action_list_t::reverse_iterator rit = action_trace->rbegin();
1012 for (; (*rit) != acquire; rit++) {
1013 ModelAction *propagate = *rit;
1014 if (acquire->happens_before(propagate)) {
1015 propagate->synchronize_with(acquire);
1016 /* Re-check 'propagate' for mo_graph edges */
1017 work_queue->push_back(MOEdgeWorkEntry(propagate));
1021 /* Break release sequence with new edges:
1022 * release --mo--> write --mo--> rf */
1023 mo_graph->addEdge(release, write);
1024 mo_graph->addEdge(write, rf);
1027 /* See if we have realized a data race */
1032 * Initialize the current action by performing one or more of the following
1033 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1034 * in the NodeStack, manipulating backtracking sets, allocating and
1035 * initializing clock vectors, and computing the promises to fulfill.
1037 * @param curr The current action, as passed from the user context; may be
1038 * freed/invalidated after the execution of this function, with a different
1039 * action "returned" its place (pass-by-reference)
1040 * @return True if curr is a newly-explored action; false otherwise
1042 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1044 ModelAction *newcurr;
1046 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1047 newcurr = process_rmw(*curr);
1050 if (newcurr->is_rmw())
1051 compute_promises(newcurr);
1057 (*curr)->set_seq_number(get_next_seq_num());
1059 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1061 /* First restore type and order in case of RMW operation */
1062 if ((*curr)->is_rmwr())
1063 newcurr->copy_typeandorder(*curr);
1065 ASSERT((*curr)->get_location() == newcurr->get_location());
1066 newcurr->copy_from_new(*curr);
1068 /* Discard duplicate ModelAction; use action from NodeStack */
1071 /* Always compute new clock vector */
1072 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1075 return false; /* Action was explored previously */
1079 /* Always compute new clock vector */
1080 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1082 /* Assign most recent release fence */
1083 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1086 * Perform one-time actions when pushing new ModelAction onto
1089 if (newcurr->is_write())
1090 compute_promises(newcurr);
1091 else if (newcurr->is_relseq_fixup())
1092 compute_relseq_breakwrites(newcurr);
1093 else if (newcurr->is_wait())
1094 newcurr->get_node()->set_misc_max(2);
1095 else if (newcurr->is_notify_one()) {
1096 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1098 return true; /* This was a new ModelAction */
1103 * @brief Establish reads-from relation between two actions
1105 * Perform basic operations involved with establishing a concrete rf relation,
1106 * including setting the ModelAction data and checking for release sequences.
1108 * @param act The action that is reading (must be a read)
1109 * @param rf The action from which we are reading (must be a write)
1111 * @return True if this read established synchronization
1113 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1115 act->set_read_from(rf);
1116 if (rf != NULL && act->is_acquire()) {
1117 rel_heads_list_t release_heads;
1118 get_release_seq_heads(act, act, &release_heads);
1119 int num_heads = release_heads.size();
1120 for (unsigned int i = 0; i < release_heads.size(); i++)
1121 if (!act->synchronize_with(release_heads[i])) {
1122 set_bad_synchronization();
1125 return num_heads > 0;
1131 * @brief Check whether a model action is enabled.
1133 * Checks whether a lock or join operation would be successful (i.e., is the
1134 * lock already locked, or is the joined thread already complete). If not, put
1135 * the action in a waiter list.
1137 * @param curr is the ModelAction to check whether it is enabled.
1138 * @return a bool that indicates whether the action is enabled.
1140 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1141 if (curr->is_lock()) {
1142 std::mutex *lock = (std::mutex *)curr->get_location();
1143 struct std::mutex_state *state = lock->get_state();
1144 if (state->islocked) {
1145 //Stick the action in the appropriate waiting queue
1146 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1149 } else if (curr->get_type() == THREAD_JOIN) {
1150 Thread *blocking = (Thread *)curr->get_location();
1151 if (!blocking->is_complete()) {
1152 blocking->push_wait_list(curr);
1161 * Stores the ModelAction for the current thread action. Call this
1162 * immediately before switching from user- to system-context to pass
1163 * data between them.
1164 * @param act The ModelAction created by the user-thread action
1166 void ModelChecker::set_current_action(ModelAction *act) {
1167 priv->current_action = act;
1171 * This is the heart of the model checker routine. It performs model-checking
1172 * actions corresponding to a given "current action." Among other processes, it
1173 * calculates reads-from relationships, updates synchronization clock vectors,
1174 * forms a memory_order constraints graph, and handles replay/backtrack
1175 * execution when running permutations of previously-observed executions.
1177 * @param curr The current action to process
1178 * @return The next Thread that must be executed. May be NULL if ModelChecker
1179 * makes no choice (e.g., according to replay execution, combining RMW actions,
1182 Thread * ModelChecker::check_current_action(ModelAction *curr)
1185 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1187 if (!check_action_enabled(curr)) {
1188 /* Make the execution look like we chose to run this action
1189 * much later, when a lock/join can succeed */
1190 get_current_thread()->set_pending(curr);
1191 scheduler->sleep(get_current_thread());
1192 return get_next_thread(NULL);
1195 bool newly_explored = initialize_curr_action(&curr);
1197 wake_up_sleeping_actions(curr);
1199 /* Add the action to lists before any other model-checking tasks */
1200 if (!second_part_of_rmw)
1201 add_action_to_lists(curr);
1203 /* Build may_read_from set for newly-created actions */
1204 if (newly_explored && curr->is_read())
1205 build_reads_from_past(curr);
1207 /* Initialize work_queue with the "current action" work */
1208 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1209 while (!work_queue.empty() && !has_asserted()) {
1210 WorkQueueEntry work = work_queue.front();
1211 work_queue.pop_front();
1213 switch (work.type) {
1214 case WORK_CHECK_CURR_ACTION: {
1215 ModelAction *act = work.action;
1216 bool update = false; /* update this location's release seq's */
1217 bool update_all = false; /* update all release seq's */
1219 if (process_thread_action(curr))
1222 if (act->is_read() && process_read(act, second_part_of_rmw))
1225 if (act->is_write() && process_write(act))
1228 if (act->is_fence() && process_fence(act))
1231 if (act->is_mutex_op() && process_mutex(act))
1234 if (act->is_relseq_fixup())
1235 process_relseq_fixup(curr, &work_queue);
1238 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1240 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1243 case WORK_CHECK_RELEASE_SEQ:
1244 resolve_release_sequences(work.location, &work_queue);
1246 case WORK_CHECK_MO_EDGES: {
1247 /** @todo Complete verification of work_queue */
1248 ModelAction *act = work.action;
1249 bool updated = false;
1251 if (act->is_read()) {
1252 const ModelAction *rf = act->get_reads_from();
1253 if (rf != NULL && r_modification_order(act, rf))
1256 if (act->is_write()) {
1257 if (w_modification_order(act))
1260 mo_graph->commitChanges();
1263 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1272 check_curr_backtracking(curr);
1273 set_backtracking(curr);
1274 return get_next_thread(curr);
1277 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1279 Node *currnode = curr->get_node();
1280 Node *parnode = currnode->get_parent();
1282 if ((!parnode->backtrack_empty() ||
1283 !currnode->misc_empty() ||
1284 !currnode->read_from_empty() ||
1285 !currnode->future_value_empty() ||
1286 !currnode->promise_empty() ||
1287 !currnode->relseq_break_empty())
1288 && (!priv->next_backtrack ||
1289 *curr > *priv->next_backtrack)) {
1290 priv->next_backtrack = curr;
1294 bool ModelChecker::promises_expired() const
1296 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1297 Promise *promise = (*promises)[promise_index];
1298 if (promise->get_expiration()<priv->used_sequence_numbers) {
1306 * This is the strongest feasibility check available.
1307 * @return whether the current trace (partial or complete) must be a prefix of
1310 bool ModelChecker::isfeasibleprefix() const
1312 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1316 * Returns whether the current completed trace is feasible, except for pending
1317 * release sequences.
1319 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1321 if (DBG_ENABLED() && promises->size() != 0)
1322 DEBUG("Infeasible: unrevolved promises\n");
1324 return !is_infeasible() && promises->size() == 0;
1328 * Check if the current partial trace is infeasible. Does not check any
1329 * end-of-execution flags, which might rule out the execution. Thus, this is
1330 * useful only for ruling an execution as infeasible.
1331 * @return whether the current partial trace is infeasible.
1333 bool ModelChecker::is_infeasible() const
1335 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1336 DEBUG("Infeasible: RMW violation\n");
1338 return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1342 * Check If the current partial trace is infeasible, while ignoring
1343 * infeasibility related to 2 RMW's reading from the same store. It does not
1344 * check end-of-execution feasibility.
1345 * @see ModelChecker::is_infeasible
1346 * @return whether the current partial trace is infeasible, ignoring multiple
1347 * RMWs reading from the same store.
1349 bool ModelChecker::is_infeasible_ignoreRMW() const
1351 if (DBG_ENABLED()) {
1352 if (mo_graph->checkForCycles())
1353 DEBUG("Infeasible: modification order cycles\n");
1354 if (priv->failed_promise)
1355 DEBUG("Infeasible: failed promise\n");
1356 if (priv->too_many_reads)
1357 DEBUG("Infeasible: too many reads\n");
1358 if (priv->bad_synchronization)
1359 DEBUG("Infeasible: bad synchronization ordering\n");
1360 if (promises_expired())
1361 DEBUG("Infeasible: promises expired\n");
1363 return mo_graph->checkForCycles() || priv->failed_promise ||
1364 priv->too_many_reads || priv->bad_synchronization ||
1368 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1369 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1370 ModelAction *lastread = get_last_action(act->get_tid());
1371 lastread->process_rmw(act);
1372 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1373 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1374 mo_graph->commitChanges();
1380 * Checks whether a thread has read from the same write for too many times
1381 * without seeing the effects of a later write.
1384 * 1) there must a different write that we could read from that would satisfy the modification order,
1385 * 2) we must have read from the same value in excess of maxreads times, and
1386 * 3) that other write must have been in the reads_from set for maxreads times.
1388 * If so, we decide that the execution is no longer feasible.
1390 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1392 if (params.maxreads != 0) {
1393 if (curr->get_node()->get_read_from_size() <= 1)
1395 //Must make sure that execution is currently feasible... We could
1396 //accidentally clear by rolling back
1397 if (is_infeasible())
1399 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1400 int tid = id_to_int(curr->get_tid());
1403 if ((int)thrd_lists->size() <= tid)
1405 action_list_t *list = &(*thrd_lists)[tid];
1407 action_list_t::reverse_iterator rit = list->rbegin();
1408 /* Skip past curr */
1409 for (; (*rit) != curr; rit++)
1411 /* go past curr now */
1414 action_list_t::reverse_iterator ritcopy = rit;
1415 //See if we have enough reads from the same value
1417 for (; count < params.maxreads; rit++,count++) {
1418 if (rit == list->rend())
1420 ModelAction *act = *rit;
1421 if (!act->is_read())
1424 if (act->get_reads_from() != rf)
1426 if (act->get_node()->get_read_from_size() <= 1)
1429 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1431 const ModelAction *write = curr->get_node()->get_read_from_at(i);
1433 /* Need a different write */
1437 /* Test to see whether this is a feasible write to read from*/
1438 mo_graph->startChanges();
1439 r_modification_order(curr, write);
1440 bool feasiblereadfrom = !is_infeasible();
1441 mo_graph->rollbackChanges();
1443 if (!feasiblereadfrom)
1447 bool feasiblewrite = true;
1448 //new we need to see if this write works for everyone
1450 for (int loop = count; loop>0; loop--,rit++) {
1451 ModelAction *act = *rit;
1452 bool foundvalue = false;
1453 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1454 if (act->get_node()->get_read_from_at(j)==write) {
1460 feasiblewrite = false;
1464 if (feasiblewrite) {
1465 priv->too_many_reads = true;
1473 * Updates the mo_graph with the constraints imposed from the current
1476 * Basic idea is the following: Go through each other thread and find
1477 * the lastest action that happened before our read. Two cases:
1479 * (1) The action is a write => that write must either occur before
1480 * the write we read from or be the write we read from.
1482 * (2) The action is a read => the write that that action read from
1483 * must occur before the write we read from or be the same write.
1485 * @param curr The current action. Must be a read.
1486 * @param rf The action that curr reads from. Must be a write.
1487 * @return True if modification order edges were added; false otherwise
1489 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1491 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1494 ASSERT(curr->is_read());
1496 /* Last SC fence in the current thread */
1497 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1499 /* Iterate over all threads */
1500 for (i = 0; i < thrd_lists->size(); i++) {
1501 /* Last SC fence in thread i */
1502 ModelAction *last_sc_fence_thread_local = NULL;
1503 if (int_to_id((int)i) != curr->get_tid())
1504 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1506 /* Last SC fence in thread i, before last SC fence in current thread */
1507 ModelAction *last_sc_fence_thread_before = NULL;
1508 if (last_sc_fence_local)
1509 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1511 /* Iterate over actions in thread, starting from most recent */
1512 action_list_t *list = &(*thrd_lists)[i];
1513 action_list_t::reverse_iterator rit;
1514 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1515 ModelAction *act = *rit;
1517 if (act->is_write() && act != rf && act != curr) {
1518 /* C++, Section 29.3 statement 5 */
1519 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1520 *act < *last_sc_fence_thread_local) {
1521 mo_graph->addEdge(act, rf);
1525 /* C++, Section 29.3 statement 4 */
1526 else if (act->is_seqcst() && last_sc_fence_local &&
1527 *act < *last_sc_fence_local) {
1528 mo_graph->addEdge(act, rf);
1532 /* C++, Section 29.3 statement 6 */
1533 else if (last_sc_fence_thread_before &&
1534 *act < *last_sc_fence_thread_before) {
1535 mo_graph->addEdge(act, rf);
1542 * Include at most one act per-thread that "happens
1543 * before" curr. Don't consider reflexively.
1545 if (act->happens_before(curr) && act != curr) {
1546 if (act->is_write()) {
1548 mo_graph->addEdge(act, rf);
1552 const ModelAction *prevreadfrom = act->get_reads_from();
1553 //if the previous read is unresolved, keep going...
1554 if (prevreadfrom == NULL)
1557 if (rf != prevreadfrom) {
1558 mo_graph->addEdge(prevreadfrom, rf);
1570 /** This method fixes up the modification order when we resolve a
1571 * promises. The basic problem is that actions that occur after the
1572 * read curr could not property add items to the modification order
1575 * So for each thread, we find the earliest item that happens after
1576 * the read curr. This is the item we have to fix up with additional
1577 * constraints. If that action is write, we add a MO edge between
1578 * the Action rf and that action. If the action is a read, we add a
1579 * MO edge between the Action rf, and whatever the read accessed.
1581 * @param curr is the read ModelAction that we are fixing up MO edges for.
1582 * @param rf is the write ModelAction that curr reads from.
1585 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1587 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1589 ASSERT(curr->is_read());
1591 /* Iterate over all threads */
1592 for (i = 0; i < thrd_lists->size(); i++) {
1593 /* Iterate over actions in thread, starting from most recent */
1594 action_list_t *list = &(*thrd_lists)[i];
1595 action_list_t::reverse_iterator rit;
1596 ModelAction *lastact = NULL;
1598 /* Find last action that happens after curr that is either not curr or a rmw */
1599 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1600 ModelAction *act = *rit;
1601 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1607 /* Include at most one act per-thread that "happens before" curr */
1608 if (lastact != NULL) {
1609 if (lastact==curr) {
1610 //Case 1: The resolved read is a RMW, and we need to make sure
1611 //that the write portion of the RMW mod order after rf
1613 mo_graph->addEdge(rf, lastact);
1614 } else if (lastact->is_read()) {
1615 //Case 2: The resolved read is a normal read and the next
1616 //operation is a read, and we need to make sure the value read
1617 //is mod ordered after rf
1619 const ModelAction *postreadfrom = lastact->get_reads_from();
1620 if (postreadfrom != NULL&&rf != postreadfrom)
1621 mo_graph->addEdge(rf, postreadfrom);
1623 //Case 3: The resolved read is a normal read and the next
1624 //operation is a write, and we need to make sure that the
1625 //write is mod ordered after rf
1627 mo_graph->addEdge(rf, lastact);
1635 * Updates the mo_graph with the constraints imposed from the current write.
1637 * Basic idea is the following: Go through each other thread and find
1638 * the lastest action that happened before our write. Two cases:
1640 * (1) The action is a write => that write must occur before
1643 * (2) The action is a read => the write that that action read from
1644 * must occur before the current write.
1646 * This method also handles two other issues:
1648 * (I) Sequential Consistency: Making sure that if the current write is
1649 * seq_cst, that it occurs after the previous seq_cst write.
1651 * (II) Sending the write back to non-synchronizing reads.
1653 * @param curr The current action. Must be a write.
1654 * @return True if modification order edges were added; false otherwise
1656 bool ModelChecker::w_modification_order(ModelAction *curr)
1658 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1661 ASSERT(curr->is_write());
1663 if (curr->is_seqcst()) {
1664 /* We have to at least see the last sequentially consistent write,
1665 so we are initialized. */
1666 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1667 if (last_seq_cst != NULL) {
1668 mo_graph->addEdge(last_seq_cst, curr);
1673 /* Last SC fence in the current thread */
1674 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1676 /* Iterate over all threads */
1677 for (i = 0; i < thrd_lists->size(); i++) {
1678 /* Last SC fence in thread i, before last SC fence in current thread */
1679 ModelAction *last_sc_fence_thread_before = NULL;
1680 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1681 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1683 /* Iterate over actions in thread, starting from most recent */
1684 action_list_t *list = &(*thrd_lists)[i];
1685 action_list_t::reverse_iterator rit;
1686 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1687 ModelAction *act = *rit;
1690 * 1) If RMW and it actually read from something, then we
1691 * already have all relevant edges, so just skip to next
1694 * 2) If RMW and it didn't read from anything, we should
1695 * whatever edge we can get to speed up convergence.
1697 * 3) If normal write, we need to look at earlier actions, so
1698 * continue processing list.
1700 if (curr->is_rmw()) {
1701 if (curr->get_reads_from()!=NULL)
1709 /* C++, Section 29.3 statement 7 */
1710 if (last_sc_fence_thread_before && act->is_write() &&
1711 *act < *last_sc_fence_thread_before) {
1712 mo_graph->addEdge(act, curr);
1718 * Include at most one act per-thread that "happens
1721 if (act->happens_before(curr)) {
1723 * Note: if act is RMW, just add edge:
1725 * The following edge should be handled elsewhere:
1726 * readfrom(act) --mo--> act
1728 if (act->is_write())
1729 mo_graph->addEdge(act, curr);
1730 else if (act->is_read()) {
1731 //if previous read accessed a null, just keep going
1732 if (act->get_reads_from() == NULL)
1734 mo_graph->addEdge(act->get_reads_from(), curr);
1738 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1739 !act->same_thread(curr)) {
1740 /* We have an action that:
1741 (1) did not happen before us
1742 (2) is a read and we are a write
1743 (3) cannot synchronize with us
1744 (4) is in a different thread
1746 that read could potentially read from our write. Note that
1747 these checks are overly conservative at this point, we'll
1748 do more checks before actually removing the
1752 if (thin_air_constraint_may_allow(curr, act)) {
1753 if (!is_infeasible() ||
1754 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1755 struct PendingFutureValue pfv = {curr,act};
1756 futurevalues->push_back(pfv);
1766 /** Arbitrary reads from the future are not allowed. Section 29.3
1767 * part 9 places some constraints. This method checks one result of constraint
1768 * constraint. Others require compiler support. */
1769 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1771 if (!writer->is_rmw())
1774 if (!reader->is_rmw())
1777 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1778 if (search == reader)
1780 if (search->get_tid() == reader->get_tid() &&
1781 search->happens_before(reader))
1789 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1790 * some constraints. This method checks one the following constraint (others
1791 * require compiler support):
1793 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1795 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1797 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1799 /* Iterate over all threads */
1800 for (i = 0; i < thrd_lists->size(); i++) {
1801 const ModelAction *write_after_read = NULL;
1803 /* Iterate over actions in thread, starting from most recent */
1804 action_list_t *list = &(*thrd_lists)[i];
1805 action_list_t::reverse_iterator rit;
1806 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1807 ModelAction *act = *rit;
1809 if (!reader->happens_before(act))
1811 else if (act->is_write())
1812 write_after_read = act;
1813 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1814 write_after_read = act->get_reads_from();
1818 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1825 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1826 * The ModelAction under consideration is expected to be taking part in
1827 * release/acquire synchronization as an object of the "reads from" relation.
1828 * Note that this can only provide release sequence support for RMW chains
1829 * which do not read from the future, as those actions cannot be traced until
1830 * their "promise" is fulfilled. Similarly, we may not even establish the
1831 * presence of a release sequence with certainty, as some modification order
1832 * constraints may be decided further in the future. Thus, this function
1833 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1834 * and a boolean representing certainty.
1836 * @param rf The action that might be part of a release sequence. Must be a
1838 * @param release_heads A pass-by-reference style return parameter. After
1839 * execution of this function, release_heads will contain the heads of all the
1840 * relevant release sequences, if any exists with certainty
1841 * @param pending A pass-by-reference style return parameter which is only used
1842 * when returning false (i.e., uncertain). Returns most information regarding
1843 * an uncertain release sequence, including any write operations that might
1844 * break the sequence.
1845 * @return true, if the ModelChecker is certain that release_heads is complete;
1848 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1849 rel_heads_list_t *release_heads,
1850 struct release_seq *pending) const
1852 /* Only check for release sequences if there are no cycles */
1853 if (mo_graph->checkForCycles())
1857 ASSERT(rf->is_write());
1859 if (rf->is_release())
1860 release_heads->push_back(rf);
1861 else if (rf->get_last_fence_release())
1862 release_heads->push_back(rf->get_last_fence_release());
1864 break; /* End of RMW chain */
1866 /** @todo Need to be smarter here... In the linux lock
1867 * example, this will run to the beginning of the program for
1869 /** @todo The way to be smarter here is to keep going until 1
1870 * thread has a release preceded by an acquire and you've seen
1873 /* acq_rel RMW is a sufficient stopping condition */
1874 if (rf->is_acquire() && rf->is_release())
1875 return true; /* complete */
1877 rf = rf->get_reads_from();
1880 /* read from future: need to settle this later */
1882 return false; /* incomplete */
1885 if (rf->is_release())
1886 return true; /* complete */
1888 /* else relaxed write
1889 * - check for fence-release in the same thread (29.8, stmt. 3)
1890 * - check modification order for contiguous subsequence
1891 * -> rf must be same thread as release */
1893 const ModelAction *fence_release = rf->get_last_fence_release();
1894 /* Synchronize with a fence-release unconditionally; we don't need to
1895 * find any more "contiguous subsequence..." for it */
1897 release_heads->push_back(fence_release);
1899 int tid = id_to_int(rf->get_tid());
1900 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1901 action_list_t *list = &(*thrd_lists)[tid];
1902 action_list_t::const_reverse_iterator rit;
1904 /* Find rf in the thread list */
1905 rit = std::find(list->rbegin(), list->rend(), rf);
1906 ASSERT(rit != list->rend());
1908 /* Find the last {write,fence}-release */
1909 for (; rit != list->rend(); rit++) {
1910 if (fence_release && *(*rit) < *fence_release)
1912 if ((*rit)->is_release())
1915 if (rit == list->rend()) {
1916 /* No write-release in this thread */
1917 return true; /* complete */
1918 } else if (fence_release && *(*rit) < *fence_release) {
1919 /* The fence-release is more recent (and so, "stronger") than
1920 * the most recent write-release */
1921 return true; /* complete */
1922 } /* else, need to establish contiguous release sequence */
1923 ModelAction *release = *rit;
1925 ASSERT(rf->same_thread(release));
1927 pending->writes.clear();
1929 bool certain = true;
1930 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1931 if (id_to_int(rf->get_tid()) == (int)i)
1933 list = &(*thrd_lists)[i];
1935 /* Can we ensure no future writes from this thread may break
1936 * the release seq? */
1937 bool future_ordered = false;
1939 ModelAction *last = get_last_action(int_to_id(i));
1940 Thread *th = get_thread(int_to_id(i));
1941 if ((last && rf->happens_before(last)) ||
1944 future_ordered = true;
1946 ASSERT(!th->is_model_thread() || future_ordered);
1948 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1949 const ModelAction *act = *rit;
1950 /* Reach synchronization -> this thread is complete */
1951 if (act->happens_before(release))
1953 if (rf->happens_before(act)) {
1954 future_ordered = true;
1958 /* Only non-RMW writes can break release sequences */
1959 if (!act->is_write() || act->is_rmw())
1962 /* Check modification order */
1963 if (mo_graph->checkReachable(rf, act)) {
1964 /* rf --mo--> act */
1965 future_ordered = true;
1968 if (mo_graph->checkReachable(act, release))
1969 /* act --mo--> release */
1971 if (mo_graph->checkReachable(release, act) &&
1972 mo_graph->checkReachable(act, rf)) {
1973 /* release --mo-> act --mo--> rf */
1974 return true; /* complete */
1976 /* act may break release sequence */
1977 pending->writes.push_back(act);
1980 if (!future_ordered)
1981 certain = false; /* This thread is uncertain */
1985 release_heads->push_back(release);
1986 pending->writes.clear();
1988 pending->release = release;
1995 * An interface for getting the release sequence head(s) with which a
1996 * given ModelAction must synchronize. This function only returns a non-empty
1997 * result when it can locate a release sequence head with certainty. Otherwise,
1998 * it may mark the internal state of the ModelChecker so that it will handle
1999 * the release sequence at a later time, causing @a acquire to update its
2000 * synchronization at some later point in execution.
2002 * @param acquire The 'acquire' action that may synchronize with a release
2004 * @param read The read action that may read from a release sequence; this may
2005 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2006 * when 'acquire' is a fence-acquire)
2007 * @param release_heads A pass-by-reference return parameter. Will be filled
2008 * with the head(s) of the release sequence(s), if they exists with certainty.
2009 * @see ModelChecker::release_seq_heads
2011 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2012 ModelAction *read, rel_heads_list_t *release_heads)
2014 const ModelAction *rf = read->get_reads_from();
2015 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2016 sequence->acquire = acquire;
2017 sequence->read = read;
2019 if (!release_seq_heads(rf, release_heads, sequence)) {
2020 /* add act to 'lazy checking' list */
2021 pending_rel_seqs->push_back(sequence);
2023 snapshot_free(sequence);
2028 * Attempt to resolve all stashed operations that might synchronize with a
2029 * release sequence for a given location. This implements the "lazy" portion of
2030 * determining whether or not a release sequence was contiguous, since not all
2031 * modification order information is present at the time an action occurs.
2033 * @param location The location/object that should be checked for release
2034 * sequence resolutions. A NULL value means to check all locations.
2035 * @param work_queue The work queue to which to add work items as they are
2037 * @return True if any updates occurred (new synchronization, new mo_graph
2040 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2042 bool updated = false;
2043 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2044 while (it != pending_rel_seqs->end()) {
2045 struct release_seq *pending = *it;
2046 ModelAction *acquire = pending->acquire;
2047 const ModelAction *read = pending->read;
2049 /* Only resolve sequences on the given location, if provided */
2050 if (location && read->get_location() != location) {
2055 const ModelAction *rf = read->get_reads_from();
2056 rel_heads_list_t release_heads;
2058 complete = release_seq_heads(rf, &release_heads, pending);
2059 for (unsigned int i = 0; i < release_heads.size(); i++) {
2060 if (!acquire->has_synchronized_with(release_heads[i])) {
2061 if (acquire->synchronize_with(release_heads[i]))
2064 set_bad_synchronization();
2069 /* Re-check all pending release sequences */
2070 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2071 /* Re-check read-acquire for mo_graph edges */
2072 if (acquire->is_read())
2073 work_queue->push_back(MOEdgeWorkEntry(acquire));
2075 /* propagate synchronization to later actions */
2076 action_list_t::reverse_iterator rit = action_trace->rbegin();
2077 for (; (*rit) != acquire; rit++) {
2078 ModelAction *propagate = *rit;
2079 if (acquire->happens_before(propagate)) {
2080 propagate->synchronize_with(acquire);
2081 /* Re-check 'propagate' for mo_graph edges */
2082 work_queue->push_back(MOEdgeWorkEntry(propagate));
2087 it = pending_rel_seqs->erase(it);
2088 snapshot_free(pending);
2094 // If we resolved promises or data races, see if we have realized a data race.
2101 * Performs various bookkeeping operations for the current ModelAction. For
2102 * instance, adds action to the per-object, per-thread action vector and to the
2103 * action trace list of all thread actions.
2105 * @param act is the ModelAction to add.
2107 void ModelChecker::add_action_to_lists(ModelAction *act)
2109 int tid = id_to_int(act->get_tid());
2110 ModelAction *uninit = NULL;
2112 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2113 if (list->empty() && act->is_atomic_var()) {
2114 uninit = new_uninitialized_action(act->get_location());
2115 uninit_id = id_to_int(uninit->get_tid());
2116 list->push_back(uninit);
2118 list->push_back(act);
2120 action_trace->push_back(act);
2122 action_trace->push_front(uninit);
2124 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2125 if (tid >= (int)vec->size())
2126 vec->resize(priv->next_thread_id);
2127 (*vec)[tid].push_back(act);
2129 (*vec)[uninit_id].push_front(uninit);
2131 if ((int)thrd_last_action->size() <= tid)
2132 thrd_last_action->resize(get_num_threads());
2133 (*thrd_last_action)[tid] = act;
2135 (*thrd_last_action)[uninit_id] = uninit;
2137 if (act->is_fence() && act->is_release()) {
2138 if ((int)thrd_last_fence_release->size() <= tid)
2139 thrd_last_fence_release->resize(get_num_threads());
2140 (*thrd_last_fence_release)[tid] = act;
2143 if (act->is_wait()) {
2144 void *mutex_loc=(void *) act->get_value();
2145 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2147 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2148 if (tid >= (int)vec->size())
2149 vec->resize(priv->next_thread_id);
2150 (*vec)[tid].push_back(act);
2155 * @brief Get the last action performed by a particular Thread
2156 * @param tid The thread ID of the Thread in question
2157 * @return The last action in the thread
2159 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2161 int threadid = id_to_int(tid);
2162 if (threadid < (int)thrd_last_action->size())
2163 return (*thrd_last_action)[id_to_int(tid)];
2169 * @brief Get the last fence release performed by a particular Thread
2170 * @param tid The thread ID of the Thread in question
2171 * @return The last fence release in the thread, if one exists; NULL otherwise
2173 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2175 int threadid = id_to_int(tid);
2176 if (threadid < (int)thrd_last_fence_release->size())
2177 return (*thrd_last_fence_release)[id_to_int(tid)];
2183 * Gets the last memory_order_seq_cst write (in the total global sequence)
2184 * performed on a particular object (i.e., memory location), not including the
2186 * @param curr The current ModelAction; also denotes the object location to
2188 * @return The last seq_cst write
2190 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2192 void *location = curr->get_location();
2193 action_list_t *list = get_safe_ptr_action(obj_map, location);
2194 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2195 action_list_t::reverse_iterator rit;
2196 for (rit = list->rbegin(); rit != list->rend(); rit++)
2197 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2203 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2204 * performed in a particular thread, prior to a particular fence.
2205 * @param tid The ID of the thread to check
2206 * @param before_fence The fence from which to begin the search; if NULL, then
2207 * search for the most recent fence in the thread.
2208 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2210 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2212 /* All fences should have NULL location */
2213 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2214 action_list_t::reverse_iterator rit = list->rbegin();
2217 for (; rit != list->rend(); rit++)
2218 if (*rit == before_fence)
2221 ASSERT(*rit == before_fence);
2225 for (; rit != list->rend(); rit++)
2226 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2232 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2233 * location). This function identifies the mutex according to the current
2234 * action, which is presumed to perform on the same mutex.
2235 * @param curr The current ModelAction; also denotes the object location to
2237 * @return The last unlock operation
2239 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2241 void *location = curr->get_location();
2242 action_list_t *list = get_safe_ptr_action(obj_map, location);
2243 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2244 action_list_t::reverse_iterator rit;
2245 for (rit = list->rbegin(); rit != list->rend(); rit++)
2246 if ((*rit)->is_unlock() || (*rit)->is_wait())
2251 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2253 ModelAction *parent = get_last_action(tid);
2255 parent = get_thread(tid)->get_creation();
2260 * Returns the clock vector for a given thread.
2261 * @param tid The thread whose clock vector we want
2262 * @return Desired clock vector
2264 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2266 return get_parent_action(tid)->get_cv();
2270 * Resolve a set of Promises with a current write. The set is provided in the
2271 * Node corresponding to @a write.
2272 * @param write The ModelAction that is fulfilling Promises
2273 * @return True if promises were resolved; false otherwise
2275 bool ModelChecker::resolve_promises(ModelAction *write)
2277 bool resolved = false;
2278 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2280 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2281 Promise *promise = (*promises)[promise_index];
2282 if (write->get_node()->get_promise(i)) {
2283 ModelAction *read = promise->get_action();
2284 if (read->is_rmw()) {
2285 mo_graph->addRMWEdge(write, read);
2287 read_from(read, write);
2288 //First fix up the modification order for actions that happened
2290 r_modification_order(read, write);
2291 //Next fix up the modification order for actions that happened
2293 post_r_modification_order(read, write);
2294 //Make sure the promise's value matches the write's value
2295 ASSERT(promise->get_value() == write->get_value());
2298 promises->erase(promises->begin() + promise_index);
2299 threads_to_check.push_back(read->get_tid());
2306 //Check whether reading these writes has made threads unable to
2309 for(unsigned int i=0;i<threads_to_check.size();i++)
2310 mo_check_promises(threads_to_check[i], write);
2316 * Compute the set of promises that could potentially be satisfied by this
2317 * action. Note that the set computation actually appears in the Node, not in
2319 * @param curr The ModelAction that may satisfy promises
2321 void ModelChecker::compute_promises(ModelAction *curr)
2323 for (unsigned int i = 0; i < promises->size(); i++) {
2324 Promise *promise = (*promises)[i];
2325 const ModelAction *act = promise->get_action();
2326 if (!act->happens_before(curr) &&
2328 !act->could_synchronize_with(curr) &&
2329 !act->same_thread(curr) &&
2330 act->get_location() == curr->get_location() &&
2331 promise->get_value() == curr->get_value()) {
2332 curr->get_node()->set_promise(i, act->is_rmw());
2337 /** Checks promises in response to change in ClockVector Threads. */
2338 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2340 for (unsigned int i = 0; i < promises->size(); i++) {
2341 Promise *promise = (*promises)[i];
2342 const ModelAction *act = promise->get_action();
2343 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2344 merge_cv->synchronized_since(act)) {
2345 if (promise->increment_threads(tid)) {
2346 //Promise has failed
2347 priv->failed_promise = true;
2354 void ModelChecker::check_promises_thread_disabled() {
2355 for (unsigned int i = 0; i < promises->size(); i++) {
2356 Promise *promise = (*promises)[i];
2357 if (promise->check_promise()) {
2358 priv->failed_promise = true;
2364 /** Checks promises in response to addition to modification order for threads.
2366 * pthread is the thread that performed the read that created the promise
2368 * pread is the read that created the promise
2370 * pwrite is either the first write to same location as pread by
2371 * pthread that is sequenced after pread or the value read by the
2372 * first read to the same lcoation as pread by pthread that is
2373 * sequenced after pread..
2375 * 1. If tid=pthread, then we check what other threads are reachable
2376 * through the mode order starting with pwrite. Those threads cannot
2377 * perform a write that will resolve the promise due to modification
2378 * order constraints.
2380 * 2. If the tid is not pthread, we check whether pwrite can reach the
2381 * action write through the modification order. If so, that thread
2382 * cannot perform a future write that will resolve the promise due to
2383 * modificatin order constraints.
2385 * @param tid The thread that either read from the model action
2386 * write, or actually did the model action write.
2388 * @param write The ModelAction representing the relevant write.
2390 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write)
2392 void *location = write->get_location();
2393 for (unsigned int i = 0; i < promises->size(); i++) {
2394 Promise *promise = (*promises)[i];
2395 const ModelAction *act = promise->get_action();
2397 //Is this promise on the same location?
2398 if ( act->get_location() != location )
2401 //same thread as the promise
2402 if ( act->get_tid()==tid ) {
2404 //do we have a pwrite for the promise, if not, set it
2405 if (promise->get_write() == NULL ) {
2406 promise->set_write(write);
2407 //The pwrite cannot happen before the promise
2408 if (write->happens_before(act) && (write != act)) {
2409 priv->failed_promise = true;
2413 if (mo_graph->checkPromise(write, promise)) {
2414 priv->failed_promise = true;
2419 //Don't do any lookups twice for the same thread
2420 if (promise->has_sync_thread(tid))
2423 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2424 if (promise->increment_threads(tid)) {
2425 priv->failed_promise = true;
2433 * Compute the set of writes that may break the current pending release
2434 * sequence. This information is extracted from previou release sequence
2437 * @param curr The current ModelAction. Must be a release sequence fixup
2440 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2442 if (pending_rel_seqs->empty())
2445 struct release_seq *pending = pending_rel_seqs->back();
2446 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2447 const ModelAction *write = pending->writes[i];
2448 curr->get_node()->add_relseq_break(write);
2451 /* NULL means don't break the sequence; just synchronize */
2452 curr->get_node()->add_relseq_break(NULL);
2456 * Build up an initial set of all past writes that this 'read' action may read
2457 * from. This set is determined by the clock vector's "happens before"
2459 * @param curr is the current ModelAction that we are exploring; it must be a
2462 void ModelChecker::build_reads_from_past(ModelAction *curr)
2464 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2466 ASSERT(curr->is_read());
2468 ModelAction *last_sc_write = NULL;
2470 if (curr->is_seqcst())
2471 last_sc_write = get_last_seq_cst_write(curr);
2473 /* Iterate over all threads */
2474 for (i = 0; i < thrd_lists->size(); i++) {
2475 /* Iterate over actions in thread, starting from most recent */
2476 action_list_t *list = &(*thrd_lists)[i];
2477 action_list_t::reverse_iterator rit;
2478 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2479 ModelAction *act = *rit;
2481 /* Only consider 'write' actions */
2482 if (!act->is_write() || act == curr)
2485 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2486 bool allow_read = true;
2488 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2490 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2494 DEBUG("Adding action to may_read_from:\n");
2495 if (DBG_ENABLED()) {
2499 curr->get_node()->add_read_from(act);
2502 /* Include at most one act per-thread that "happens before" curr */
2503 if (act->happens_before(curr))
2508 if (DBG_ENABLED()) {
2509 model_print("Reached read action:\n");
2511 model_print("Printing may_read_from\n");
2512 curr->get_node()->print_may_read_from();
2513 model_print("End printing may_read_from\n");
2517 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2520 /* UNINIT actions don't have a Node, and they never sleep */
2521 if (write->is_uninitialized())
2523 Node *prevnode = write->get_node()->get_parent();
2525 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2526 if (write->is_release() && thread_sleep)
2528 if (!write->is_rmw()) {
2531 if (write->get_reads_from() == NULL)
2533 write=write->get_reads_from();
2538 * @brief Create a new action representing an uninitialized atomic
2539 * @param location The memory location of the atomic object
2540 * @return A pointer to a new ModelAction
2542 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2544 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2545 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2546 act->create_cv(NULL);
2550 static void print_list(action_list_t *list, int exec_num = -1)
2552 action_list_t::iterator it;
2554 model_print("---------------------------------------------------------------------\n");
2556 model_print("Execution %d:\n", exec_num);
2558 unsigned int hash=0;
2560 for (it = list->begin(); it != list->end(); it++) {
2562 hash=hash^(hash<<3)^((*it)->hash());
2564 model_print("HASH %u\n", hash);
2565 model_print("---------------------------------------------------------------------\n");
2568 #if SUPPORT_MOD_ORDER_DUMP
2569 void ModelChecker::dumpGraph(char *filename) {
2571 sprintf(buffer, "%s.dot",filename);
2572 FILE *file=fopen(buffer, "w");
2573 fprintf(file, "digraph %s {\n",filename);
2574 mo_graph->dumpNodes(file);
2575 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2577 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2578 ModelAction *action=*it;
2579 if (action->is_read()) {
2580 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2581 if (action->get_reads_from()!=NULL)
2582 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2584 if (thread_array[action->get_tid()] != NULL) {
2585 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2588 thread_array[action->get_tid()]=action;
2590 fprintf(file,"}\n");
2591 model_free(thread_array);
2596 /** @brief Prints an execution trace summary. */
2597 void ModelChecker::print_summary() const
2599 #if SUPPORT_MOD_ORDER_DUMP
2601 char buffername[100];
2602 sprintf(buffername, "exec%04u", stats.num_total);
2603 mo_graph->dumpGraphToFile(buffername);
2604 sprintf(buffername, "graph%04u", stats.num_total);
2605 dumpGraph(buffername);
2608 if (!isfeasibleprefix())
2609 model_print("INFEASIBLE EXECUTION!\n");
2610 print_list(action_trace, stats.num_total);
2615 * Add a Thread to the system for the first time. Should only be called once
2617 * @param t The Thread to add
2619 void ModelChecker::add_thread(Thread *t)
2621 thread_map->put(id_to_int(t->get_id()), t);
2622 scheduler->add_thread(t);
2626 * Removes a thread from the scheduler.
2627 * @param the thread to remove.
2629 void ModelChecker::remove_thread(Thread *t)
2631 scheduler->remove_thread(t);
2635 * @brief Get a Thread reference by its ID
2636 * @param tid The Thread's ID
2637 * @return A Thread reference
2639 Thread * ModelChecker::get_thread(thread_id_t tid) const
2641 return thread_map->get(id_to_int(tid));
2645 * @brief Get a reference to the Thread in which a ModelAction was executed
2646 * @param act The ModelAction
2647 * @return A Thread reference
2649 Thread * ModelChecker::get_thread(ModelAction *act) const
2651 return get_thread(act->get_tid());
2655 * @brief Check if a Thread is currently enabled
2656 * @param t The Thread to check
2657 * @return True if the Thread is currently enabled
2659 bool ModelChecker::is_enabled(Thread *t) const
2661 return scheduler->is_enabled(t);
2665 * @brief Check if a Thread is currently enabled
2666 * @param tid The ID of the Thread to check
2667 * @return True if the Thread is currently enabled
2669 bool ModelChecker::is_enabled(thread_id_t tid) const
2671 return scheduler->is_enabled(tid);
2675 * Switch from a user-context to the "master thread" context (a.k.a. system
2676 * context). This switch is made with the intention of exploring a particular
2677 * model-checking action (described by a ModelAction object). Must be called
2678 * from a user-thread context.
2680 * @param act The current action that will be explored. May be NULL only if
2681 * trace is exiting via an assertion (see ModelChecker::set_assert and
2682 * ModelChecker::has_asserted).
2683 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2685 int ModelChecker::switch_to_master(ModelAction *act)
2688 Thread *old = thread_current();
2689 set_current_action(act);
2690 old->set_state(THREAD_READY);
2691 return Thread::swap(old, &system_context);
2695 * Takes the next step in the execution, if possible.
2696 * @return Returns true (success) if a step was taken and false otherwise.
2698 bool ModelChecker::take_step() {
2702 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2704 if (curr->get_state() == THREAD_READY) {
2705 ASSERT(priv->current_action);
2707 priv->nextThread = check_current_action(priv->current_action);
2708 priv->current_action = NULL;
2710 if (curr->is_blocked() || curr->is_complete())
2711 scheduler->remove_thread(curr);
2716 Thread *next = scheduler->next_thread(priv->nextThread);
2718 /* Infeasible -> don't take any more steps */
2719 if (is_infeasible())
2721 else if (isfeasibleprefix() && have_bug_reports()) {
2726 if (params.bound != 0) {
2727 if (priv->used_sequence_numbers > params.bound) {
2732 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2733 next ? id_to_int(next->get_id()) : -1);
2736 * Launch end-of-execution release sequence fixups only when there are:
2738 * (1) no more user threads to run (or when execution replay chooses
2739 * the 'model_thread')
2740 * (2) pending release sequences
2741 * (3) pending assertions (i.e., data races)
2742 * (4) no pending promises
2744 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2745 is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2746 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2747 pending_rel_seqs->size());
2748 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2749 std::memory_order_seq_cst, NULL, VALUE_NONE,
2751 set_current_action(fixup);
2755 /* next == NULL -> don't take any more steps */
2759 next->set_state(THREAD_RUNNING);
2761 if (next->get_pending() != NULL) {
2762 /* restart a pending action */
2763 set_current_action(next->get_pending());
2764 next->set_pending(NULL);
2765 next->set_state(THREAD_READY);
2769 /* Return false only if swap fails with an error */
2770 return (Thread::swap(&system_context, next) == 0);
2773 /** Wrapper to run the user's main function, with appropriate arguments */
2774 void user_main_wrapper(void *)
2776 user_main(model->params.argc, model->params.argv);
2779 /** @brief Run ModelChecker for the user program */
2780 void ModelChecker::run()
2785 /* Start user program */
2786 add_thread(new Thread(&user_thread, &user_main_wrapper, NULL));
2788 /* Wait for all threads to complete */
2789 while (take_step());
2790 } while (next_execution());