10 #include "snapshot-interface.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
16 #include "threads-model.h"
19 #define INITIAL_THREAD_ID 0
24 bug_message(const char *str) {
25 const char *fmt = " [BUG] %s\n";
26 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27 sprintf(msg, fmt, str);
29 ~bug_message() { if (msg) snapshot_free(msg); }
32 void print() { model_print("%s", msg); }
38 * Structure for holding small ModelChecker members that should be snapshotted
40 struct model_snapshot_members {
41 model_snapshot_members() :
43 /* First thread created will have id INITIAL_THREAD_ID */
44 next_thread_id(INITIAL_THREAD_ID),
45 used_sequence_numbers(0),
50 failed_promise(false),
51 too_many_reads(false),
52 bad_synchronization(false),
56 ~model_snapshot_members() {
57 for (unsigned int i = 0; i < bugs.size(); i++)
62 ModelAction *current_action;
63 unsigned int next_thread_id;
64 modelclock_t used_sequence_numbers;
66 ModelAction *next_backtrack;
67 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
68 struct execution_stats stats;
71 /** @brief Incorrectly-ordered synchronization was made */
72 bool bad_synchronization;
78 /** @brief Constructor */
79 ModelChecker::ModelChecker(struct model_params params) :
80 /* Initialize default scheduler */
82 scheduler(new Scheduler()),
84 earliest_diverge(NULL),
85 action_trace(new action_list_t()),
86 thread_map(new HashTable<int, Thread *, int>()),
87 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
89 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
90 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
91 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
92 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
93 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
94 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
95 thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
96 node_stack(new NodeStack()),
97 priv(new struct model_snapshot_members()),
98 mo_graph(new CycleGraph())
100 /* Initialize a model-checker thread, for special ModelActions */
101 model_thread = new Thread(get_next_id());
102 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
105 /** @brief Destructor */
106 ModelChecker::~ModelChecker()
108 for (unsigned int i = 0; i < get_num_threads(); i++)
109 delete thread_map->get(i);
114 delete lock_waiters_map;
115 delete condvar_waiters_map;
118 for (unsigned int i = 0; i < promises->size(); i++)
119 delete (*promises)[i];
122 delete pending_rel_seqs;
124 delete thrd_last_action;
125 delete thrd_last_fence_release;
132 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
134 action_list_t *tmp = hash->get(ptr);
136 tmp = new action_list_t();
142 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
144 std::vector<action_list_t> *tmp = hash->get(ptr);
146 tmp = new std::vector<action_list_t>();
153 * Restores user program to initial state and resets all model-checker data
156 void ModelChecker::reset_to_initial_state()
158 DEBUG("+++ Resetting to initial state +++\n");
159 node_stack->reset_execution();
161 /* Print all model-checker output before rollback */
164 snapshotObject->backTrackBeforeStep(0);
167 /** @return a thread ID for a new Thread */
168 thread_id_t ModelChecker::get_next_id()
170 return priv->next_thread_id++;
173 /** @return the number of user threads created during this execution */
174 unsigned int ModelChecker::get_num_threads() const
176 return priv->next_thread_id;
179 /** @return The currently executing Thread. */
180 Thread * ModelChecker::get_current_thread() const
182 return scheduler->get_current_thread();
185 /** @return a sequence number for a new ModelAction */
186 modelclock_t ModelChecker::get_next_seq_num()
188 return ++priv->used_sequence_numbers;
191 Node * ModelChecker::get_curr_node() const
193 return node_stack->get_head();
197 * @brief Choose the next thread to execute.
199 * This function chooses the next thread that should execute. It can force the
200 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
201 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
202 * The model-checker may have no preference regarding the next thread (i.e.,
203 * when exploring a new execution ordering), in which case this will return
205 * @param curr The current ModelAction. This action might guide the choice of
207 * @return The next thread to run. If the model-checker has no preference, NULL.
209 Thread * ModelChecker::get_next_thread(ModelAction *curr)
214 /* Do not split atomic actions. */
216 return thread_current();
217 /* The THREAD_CREATE action points to the created Thread */
218 else if (curr->get_type() == THREAD_CREATE)
219 return (Thread *)curr->get_location();
222 /* Have we completed exploring the preselected path? */
226 /* Else, we are trying to replay an execution */
227 ModelAction *next = node_stack->get_next()->get_action();
229 if (next == diverge) {
230 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
231 earliest_diverge=diverge;
233 Node *nextnode = next->get_node();
234 Node *prevnode = nextnode->get_parent();
235 scheduler->update_sleep_set(prevnode);
237 /* Reached divergence point */
238 if (nextnode->increment_misc()) {
239 /* The next node will try to satisfy a different misc_index values. */
240 tid = next->get_tid();
241 node_stack->pop_restofstack(2);
242 } else if (nextnode->increment_promise()) {
243 /* The next node will try to satisfy a different set of promises. */
244 tid = next->get_tid();
245 node_stack->pop_restofstack(2);
246 } else if (nextnode->increment_read_from()) {
247 /* The next node will read from a different value. */
248 tid = next->get_tid();
249 node_stack->pop_restofstack(2);
250 } else if (nextnode->increment_future_value()) {
251 /* The next node will try to read from a different future value. */
252 tid = next->get_tid();
253 node_stack->pop_restofstack(2);
254 } else if (nextnode->increment_relseq_break()) {
255 /* The next node will try to resolve a release sequence differently */
256 tid = next->get_tid();
257 node_stack->pop_restofstack(2);
259 /* Make a different thread execute for next step */
260 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
261 tid = prevnode->get_next_backtrack();
262 /* Make sure the backtracked thread isn't sleeping. */
263 node_stack->pop_restofstack(1);
264 if (diverge==earliest_diverge) {
265 earliest_diverge=prevnode->get_action();
268 /* The correct sleep set is in the parent node. */
271 DEBUG("*** Divergence point ***\n");
275 tid = next->get_tid();
277 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
278 ASSERT(tid != THREAD_ID_T_NONE);
279 return thread_map->get(id_to_int(tid));
283 * We need to know what the next actions of all threads in the sleep
284 * set will be. This method computes them and stores the actions at
285 * the corresponding thread object's pending action.
288 void ModelChecker::execute_sleep_set()
290 for (unsigned int i = 0; i < get_num_threads(); i++) {
291 thread_id_t tid = int_to_id(i);
292 Thread *thr = get_thread(tid);
293 if (scheduler->is_sleep_set(thr) && thr->get_pending() == NULL) {
294 thr->set_state(THREAD_RUNNING);
295 scheduler->next_thread(thr);
296 Thread::swap(&system_context, thr);
297 priv->current_action->set_sleep_flag();
298 thr->set_pending(priv->current_action);
301 priv->current_action = NULL;
304 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
306 for (unsigned int i = 0; i < get_num_threads(); i++) {
307 Thread *thr = get_thread(int_to_id(i));
308 if (scheduler->is_sleep_set(thr)) {
309 ModelAction *pending_act = thr->get_pending();
310 if ((!curr->is_rmwr()) && pending_act->could_synchronize_with(curr))
311 //Remove this thread from sleep set
312 scheduler->remove_sleep(thr);
317 /** @brief Alert the model-checker that an incorrectly-ordered
318 * synchronization was made */
319 void ModelChecker::set_bad_synchronization()
321 priv->bad_synchronization = true;
324 bool ModelChecker::has_asserted() const
326 return priv->asserted;
329 void ModelChecker::set_assert()
331 priv->asserted = true;
335 * Check if we are in a deadlock. Should only be called at the end of an
336 * execution, although it should not give false positives in the middle of an
337 * execution (there should be some ENABLED thread).
339 * @return True if program is in a deadlock; false otherwise
341 bool ModelChecker::is_deadlocked() const
343 bool blocking_threads = false;
344 for (unsigned int i = 0; i < get_num_threads(); i++) {
345 thread_id_t tid = int_to_id(i);
348 Thread *t = get_thread(tid);
349 if (!t->is_model_thread() && t->get_pending())
350 blocking_threads = true;
352 return blocking_threads;
356 * Check if this is a complete execution. That is, have all thread completed
357 * execution (rather than exiting because sleep sets have forced a redundant
360 * @return True if the execution is complete.
362 bool ModelChecker::is_complete_execution() const
364 for (unsigned int i = 0; i < get_num_threads(); i++)
365 if (is_enabled(int_to_id(i)))
371 * @brief Assert a bug in the executing program.
373 * Use this function to assert any sort of bug in the user program. If the
374 * current trace is feasible (actually, a prefix of some feasible execution),
375 * then this execution will be aborted, printing the appropriate message. If
376 * the current trace is not yet feasible, the error message will be stashed and
377 * printed if the execution ever becomes feasible.
379 * @param msg Descriptive message for the bug (do not include newline char)
380 * @return True if bug is immediately-feasible
382 bool ModelChecker::assert_bug(const char *msg)
384 priv->bugs.push_back(new bug_message(msg));
386 if (isfeasibleprefix()) {
394 * @brief Assert a bug in the executing program, asserted by a user thread
395 * @see ModelChecker::assert_bug
396 * @param msg Descriptive message for the bug (do not include newline char)
398 void ModelChecker::assert_user_bug(const char *msg)
400 /* If feasible bug, bail out now */
402 switch_to_master(NULL);
405 /** @return True, if any bugs have been reported for this execution */
406 bool ModelChecker::have_bug_reports() const
408 return priv->bugs.size() != 0;
411 /** @brief Print bug report listing for this execution (if any bugs exist) */
412 void ModelChecker::print_bugs() const
414 if (have_bug_reports()) {
415 model_print("Bug report: %zu bug%s detected\n",
417 priv->bugs.size() > 1 ? "s" : "");
418 for (unsigned int i = 0; i < priv->bugs.size(); i++)
419 priv->bugs[i]->print();
424 * @brief Record end-of-execution stats
426 * Must be run when exiting an execution. Records various stats.
427 * @see struct execution_stats
429 void ModelChecker::record_stats()
432 if (!isfeasibleprefix())
433 stats.num_infeasible++;
434 else if (have_bug_reports())
435 stats.num_buggy_executions++;
436 else if (is_complete_execution())
437 stats.num_complete++;
439 stats.num_redundant++;
442 /** @brief Print execution stats */
443 void ModelChecker::print_stats() const
445 model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
446 model_print("Number of redundant executions: %d\n", stats.num_redundant);
447 model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
448 model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
449 model_print("Total executions: %d\n", stats.num_total);
450 model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
454 * @brief End-of-exeuction print
455 * @param printbugs Should any existing bugs be printed?
457 void ModelChecker::print_execution(bool printbugs) const
459 print_program_output();
461 if (DBG_ENABLED() || params.verbose) {
462 model_print("Earliest divergence point since last feasible execution:\n");
463 if (earliest_diverge)
464 earliest_diverge->print();
466 model_print("(Not set)\n");
472 /* Don't print invalid bugs */
481 * Queries the model-checker for more executions to explore and, if one
482 * exists, resets the model-checker state to execute a new execution.
484 * @return If there are more executions to explore, return true. Otherwise,
487 bool ModelChecker::next_execution()
490 /* Is this execution a feasible execution that's worth bug-checking? */
491 bool complete = isfeasibleprefix() && (is_complete_execution() ||
494 /* End-of-execution bug checks */
497 assert_bug("Deadlock detected");
505 if (DBG_ENABLED() || params.verbose || have_bug_reports())
506 print_execution(complete);
508 clear_program_output();
511 earliest_diverge = NULL;
513 if ((diverge = get_next_backtrack()) == NULL)
517 model_print("Next execution will diverge at:\n");
521 reset_to_initial_state();
525 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
527 switch (act->get_type()) {
532 /* Optimization: relaxed operations don't need backtracking */
533 if (act->is_relaxed())
535 /* linear search: from most recent to oldest */
536 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
537 action_list_t::reverse_iterator rit;
538 for (rit = list->rbegin(); rit != list->rend(); rit++) {
539 ModelAction *prev = *rit;
540 if (prev->could_synchronize_with(act))
546 case ATOMIC_TRYLOCK: {
547 /* linear search: from most recent to oldest */
548 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
549 action_list_t::reverse_iterator rit;
550 for (rit = list->rbegin(); rit != list->rend(); rit++) {
551 ModelAction *prev = *rit;
552 if (act->is_conflicting_lock(prev))
557 case ATOMIC_UNLOCK: {
558 /* linear search: from most recent to oldest */
559 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
560 action_list_t::reverse_iterator rit;
561 for (rit = list->rbegin(); rit != list->rend(); rit++) {
562 ModelAction *prev = *rit;
563 if (!act->same_thread(prev)&&prev->is_failed_trylock())
569 /* linear search: from most recent to oldest */
570 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
571 action_list_t::reverse_iterator rit;
572 for (rit = list->rbegin(); rit != list->rend(); rit++) {
573 ModelAction *prev = *rit;
574 if (!act->same_thread(prev)&&prev->is_failed_trylock())
576 if (!act->same_thread(prev)&&prev->is_notify())
582 case ATOMIC_NOTIFY_ALL:
583 case ATOMIC_NOTIFY_ONE: {
584 /* linear search: from most recent to oldest */
585 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
586 action_list_t::reverse_iterator rit;
587 for (rit = list->rbegin(); rit != list->rend(); rit++) {
588 ModelAction *prev = *rit;
589 if (!act->same_thread(prev)&&prev->is_wait())
600 /** This method finds backtracking points where we should try to
601 * reorder the parameter ModelAction against.
603 * @param the ModelAction to find backtracking points for.
605 void ModelChecker::set_backtracking(ModelAction *act)
607 Thread *t = get_thread(act);
608 ModelAction *prev = get_last_conflict(act);
612 Node *node = prev->get_node()->get_parent();
614 int low_tid, high_tid;
615 if (node->is_enabled(t)) {
616 low_tid = id_to_int(act->get_tid());
617 high_tid = low_tid+1;
620 high_tid = get_num_threads();
623 for(int i = low_tid; i < high_tid; i++) {
624 thread_id_t tid = int_to_id(i);
626 /* Make sure this thread can be enabled here. */
627 if (i >= node->get_num_threads())
630 /* Don't backtrack into a point where the thread is disabled or sleeping. */
631 if (node->enabled_status(tid)!=THREAD_ENABLED)
634 /* Check if this has been explored already */
635 if (node->has_been_explored(tid))
638 /* See if fairness allows */
639 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
641 for(int t=0;t<node->get_num_threads();t++) {
642 thread_id_t tother=int_to_id(t);
643 if (node->is_enabled(tother) && node->has_priority(tother)) {
651 /* Cache the latest backtracking point */
652 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
653 priv->next_backtrack = prev;
655 /* If this is a new backtracking point, mark the tree */
656 if (!node->set_backtrack(tid))
658 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
659 id_to_int(prev->get_tid()),
660 id_to_int(t->get_id()));
669 * Returns last backtracking point. The model checker will explore a different
670 * path for this point in the next execution.
671 * @return The ModelAction at which the next execution should diverge.
673 ModelAction * ModelChecker::get_next_backtrack()
675 ModelAction *next = priv->next_backtrack;
676 priv->next_backtrack = NULL;
681 * Processes a read or rmw model action.
682 * @param curr is the read model action to process.
683 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
684 * @return True if processing this read updates the mo_graph.
686 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
688 uint64_t value = VALUE_NONE;
689 bool updated = false;
691 const ModelAction *reads_from = curr->get_node()->get_read_from();
692 if (reads_from != NULL) {
693 mo_graph->startChanges();
695 value = reads_from->get_value();
696 bool r_status = false;
698 if (!second_part_of_rmw) {
699 check_recency(curr, reads_from);
700 r_status = r_modification_order(curr, reads_from);
704 if (!second_part_of_rmw&&is_infeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
705 mo_graph->rollbackChanges();
706 priv->too_many_reads = false;
710 read_from(curr, reads_from);
711 mo_graph->commitChanges();
712 mo_check_promises(curr->get_tid(), reads_from);
715 } else if (!second_part_of_rmw) {
716 /* Read from future value */
717 value = curr->get_node()->get_future_value();
718 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
719 curr->set_read_from(NULL);
720 Promise *valuepromise = new Promise(curr, value, expiration);
721 promises->push_back(valuepromise);
723 get_thread(curr)->set_return_value(value);
729 * Processes a lock, trylock, or unlock model action. @param curr is
730 * the read model action to process.
732 * The try lock operation checks whether the lock is taken. If not,
733 * it falls to the normal lock operation case. If so, it returns
736 * The lock operation has already been checked that it is enabled, so
737 * it just grabs the lock and synchronizes with the previous unlock.
739 * The unlock operation has to re-enable all of the threads that are
740 * waiting on the lock.
742 * @return True if synchronization was updated; false otherwise
744 bool ModelChecker::process_mutex(ModelAction *curr) {
745 std::mutex *mutex=NULL;
746 struct std::mutex_state *state=NULL;
748 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
749 mutex = (std::mutex *)curr->get_location();
750 state = mutex->get_state();
751 } else if(curr->is_wait()) {
752 mutex = (std::mutex *)curr->get_value();
753 state = mutex->get_state();
756 switch (curr->get_type()) {
757 case ATOMIC_TRYLOCK: {
758 bool success = !state->islocked;
759 curr->set_try_lock(success);
761 get_thread(curr)->set_return_value(0);
764 get_thread(curr)->set_return_value(1);
766 //otherwise fall into the lock case
768 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
769 assert_bug("Lock access before initialization");
770 state->islocked = true;
771 ModelAction *unlock = get_last_unlock(curr);
772 //synchronize with the previous unlock statement
773 if (unlock != NULL) {
774 curr->synchronize_with(unlock);
779 case ATOMIC_UNLOCK: {
781 state->islocked = false;
782 //wake up the other threads
783 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
784 //activate all the waiting threads
785 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
786 scheduler->wake(get_thread(*rit));
793 state->islocked = false;
794 //wake up the other threads
795 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
796 //activate all the waiting threads
797 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
798 scheduler->wake(get_thread(*rit));
801 //check whether we should go to sleep or not...simulate spurious failures
802 if (curr->get_node()->get_misc()==0) {
803 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
805 scheduler->sleep(get_current_thread());
809 case ATOMIC_NOTIFY_ALL: {
810 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
811 //activate all the waiting threads
812 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
813 scheduler->wake(get_thread(*rit));
818 case ATOMIC_NOTIFY_ONE: {
819 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
820 int wakeupthread=curr->get_node()->get_misc();
821 action_list_t::iterator it = waiters->begin();
822 advance(it, wakeupthread);
823 scheduler->wake(get_thread(*it));
835 * Process a write ModelAction
836 * @param curr The ModelAction to process
837 * @return True if the mo_graph was updated or promises were resolved
839 bool ModelChecker::process_write(ModelAction *curr)
841 bool updated_mod_order = w_modification_order(curr);
842 bool updated_promises = resolve_promises(curr);
844 if (promises->size() == 0) {
845 for (unsigned int i = 0; i < futurevalues->size(); i++) {
846 struct PendingFutureValue pfv = (*futurevalues)[i];
847 //Do more ambitious checks now that mo is more complete
848 if (mo_may_allow(pfv.writer, pfv.act)&&
849 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
850 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
851 priv->next_backtrack = pfv.act;
853 futurevalues->resize(0);
856 mo_graph->commitChanges();
857 mo_check_promises(curr->get_tid(), curr);
859 get_thread(curr)->set_return_value(VALUE_NONE);
860 return updated_mod_order || updated_promises;
864 * Process a fence ModelAction
865 * @param curr The ModelAction to process
866 * @return True if synchronization was updated
868 bool ModelChecker::process_fence(ModelAction *curr)
871 * fence-relaxed: no-op
872 * fence-release: only log the occurence (not in this function), for
873 * use in later synchronization
874 * fence-acquire (this function): search for hypothetical release
877 bool updated = false;
878 if (curr->is_acquire()) {
879 action_list_t *list = action_trace;
880 action_list_t::reverse_iterator rit;
881 /* Find X : is_read(X) && X --sb-> curr */
882 for (rit = list->rbegin(); rit != list->rend(); rit++) {
883 ModelAction *act = *rit;
886 if (act->get_tid() != curr->get_tid())
888 /* Stop at the beginning of the thread */
889 if (act->is_thread_start())
891 /* Stop once we reach a prior fence-acquire */
892 if (act->is_fence() && act->is_acquire())
896 /* read-acquire will find its own release sequences */
897 if (act->is_acquire())
900 /* Establish hypothetical release sequences */
901 rel_heads_list_t release_heads;
902 get_release_seq_heads(curr, act, &release_heads);
903 for (unsigned int i = 0; i < release_heads.size(); i++)
904 if (!curr->synchronize_with(release_heads[i]))
905 set_bad_synchronization();
906 if (release_heads.size() != 0)
914 * @brief Process the current action for thread-related activity
916 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
917 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
918 * synchronization, etc. This function is a no-op for non-THREAD actions
919 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
921 * @param curr The current action
922 * @return True if synchronization was updated or a thread completed
924 bool ModelChecker::process_thread_action(ModelAction *curr)
926 bool updated = false;
928 switch (curr->get_type()) {
929 case THREAD_CREATE: {
930 Thread *th = (Thread *)curr->get_location();
931 th->set_creation(curr);
935 Thread *blocking = (Thread *)curr->get_location();
936 ModelAction *act = get_last_action(blocking->get_id());
937 curr->synchronize_with(act);
938 updated = true; /* trigger rel-seq checks */
941 case THREAD_FINISH: {
942 Thread *th = get_thread(curr);
943 while (!th->wait_list_empty()) {
944 ModelAction *act = th->pop_wait_list();
945 scheduler->wake(get_thread(act));
948 updated = true; /* trigger rel-seq checks */
952 check_promises(curr->get_tid(), NULL, curr->get_cv());
963 * @brief Process the current action for release sequence fixup activity
965 * Performs model-checker release sequence fixups for the current action,
966 * forcing a single pending release sequence to break (with a given, potential
967 * "loose" write) or to complete (i.e., synchronize). If a pending release
968 * sequence forms a complete release sequence, then we must perform the fixup
969 * synchronization, mo_graph additions, etc.
971 * @param curr The current action; must be a release sequence fixup action
972 * @param work_queue The work queue to which to add work items as they are
975 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
977 const ModelAction *write = curr->get_node()->get_relseq_break();
978 struct release_seq *sequence = pending_rel_seqs->back();
979 pending_rel_seqs->pop_back();
981 ModelAction *acquire = sequence->acquire;
982 const ModelAction *rf = sequence->rf;
983 const ModelAction *release = sequence->release;
987 ASSERT(release->same_thread(rf));
991 * @todo Forcing a synchronization requires that we set
992 * modification order constraints. For instance, we can't allow
993 * a fixup sequence in which two separate read-acquire
994 * operations read from the same sequence, where the first one
995 * synchronizes and the other doesn't. Essentially, we can't
996 * allow any writes to insert themselves between 'release' and
1000 /* Must synchronize */
1001 if (!acquire->synchronize_with(release)) {
1002 set_bad_synchronization();
1005 /* Re-check all pending release sequences */
1006 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1007 /* Re-check act for mo_graph edges */
1008 work_queue->push_back(MOEdgeWorkEntry(acquire));
1010 /* propagate synchronization to later actions */
1011 action_list_t::reverse_iterator rit = action_trace->rbegin();
1012 for (; (*rit) != acquire; rit++) {
1013 ModelAction *propagate = *rit;
1014 if (acquire->happens_before(propagate)) {
1015 propagate->synchronize_with(acquire);
1016 /* Re-check 'propagate' for mo_graph edges */
1017 work_queue->push_back(MOEdgeWorkEntry(propagate));
1021 /* Break release sequence with new edges:
1022 * release --mo--> write --mo--> rf */
1023 mo_graph->addEdge(release, write);
1024 mo_graph->addEdge(write, rf);
1027 /* See if we have realized a data race */
1032 * Initialize the current action by performing one or more of the following
1033 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1034 * in the NodeStack, manipulating backtracking sets, allocating and
1035 * initializing clock vectors, and computing the promises to fulfill.
1037 * @param curr The current action, as passed from the user context; may be
1038 * freed/invalidated after the execution of this function, with a different
1039 * action "returned" its place (pass-by-reference)
1040 * @return True if curr is a newly-explored action; false otherwise
1042 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1044 ModelAction *newcurr;
1046 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1047 newcurr = process_rmw(*curr);
1050 if (newcurr->is_rmw())
1051 compute_promises(newcurr);
1057 (*curr)->set_seq_number(get_next_seq_num());
1059 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1061 /* First restore type and order in case of RMW operation */
1062 if ((*curr)->is_rmwr())
1063 newcurr->copy_typeandorder(*curr);
1065 ASSERT((*curr)->get_location() == newcurr->get_location());
1066 newcurr->copy_from_new(*curr);
1068 /* Discard duplicate ModelAction; use action from NodeStack */
1071 /* Always compute new clock vector */
1072 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1075 return false; /* Action was explored previously */
1079 /* Always compute new clock vector */
1080 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1082 /* Assign most recent release fence */
1083 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1086 * Perform one-time actions when pushing new ModelAction onto
1089 if (newcurr->is_write())
1090 compute_promises(newcurr);
1091 else if (newcurr->is_relseq_fixup())
1092 compute_relseq_breakwrites(newcurr);
1093 else if (newcurr->is_wait())
1094 newcurr->get_node()->set_misc_max(2);
1095 else if (newcurr->is_notify_one()) {
1096 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1098 return true; /* This was a new ModelAction */
1103 * @brief Establish reads-from relation between two actions
1105 * Perform basic operations involved with establishing a concrete rf relation,
1106 * including setting the ModelAction data and checking for release sequences.
1108 * @param act The action that is reading (must be a read)
1109 * @param rf The action from which we are reading (must be a write)
1111 * @return True if this read established synchronization
1113 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1115 act->set_read_from(rf);
1116 if (rf != NULL && act->is_acquire()) {
1117 rel_heads_list_t release_heads;
1118 get_release_seq_heads(act, act, &release_heads);
1119 int num_heads = release_heads.size();
1120 for (unsigned int i = 0; i < release_heads.size(); i++)
1121 if (!act->synchronize_with(release_heads[i])) {
1122 set_bad_synchronization();
1125 return num_heads > 0;
1131 * @brief Check whether a model action is enabled.
1133 * Checks whether a lock or join operation would be successful (i.e., is the
1134 * lock already locked, or is the joined thread already complete). If not, put
1135 * the action in a waiter list.
1137 * @param curr is the ModelAction to check whether it is enabled.
1138 * @return a bool that indicates whether the action is enabled.
1140 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1141 if (curr->is_lock()) {
1142 std::mutex *lock = (std::mutex *)curr->get_location();
1143 struct std::mutex_state *state = lock->get_state();
1144 if (state->islocked) {
1145 //Stick the action in the appropriate waiting queue
1146 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1149 } else if (curr->get_type() == THREAD_JOIN) {
1150 Thread *blocking = (Thread *)curr->get_location();
1151 if (!blocking->is_complete()) {
1152 blocking->push_wait_list(curr);
1161 * Stores the ModelAction for the current thread action. Call this
1162 * immediately before switching from user- to system-context to pass
1163 * data between them.
1164 * @param act The ModelAction created by the user-thread action
1166 void ModelChecker::set_current_action(ModelAction *act) {
1167 priv->current_action = act;
1171 * This is the heart of the model checker routine. It performs model-checking
1172 * actions corresponding to a given "current action." Among other processes, it
1173 * calculates reads-from relationships, updates synchronization clock vectors,
1174 * forms a memory_order constraints graph, and handles replay/backtrack
1175 * execution when running permutations of previously-observed executions.
1177 * @param curr The current action to process
1178 * @return The next Thread that must be executed. May be NULL if ModelChecker
1179 * makes no choice (e.g., according to replay execution, combining RMW actions,
1182 Thread * ModelChecker::check_current_action(ModelAction *curr)
1185 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1187 if (!check_action_enabled(curr)) {
1188 /* Make the execution look like we chose to run this action
1189 * much later, when a lock/join can succeed */
1190 get_current_thread()->set_pending(curr);
1191 scheduler->sleep(get_current_thread());
1192 return get_next_thread(NULL);
1195 bool newly_explored = initialize_curr_action(&curr);
1197 wake_up_sleeping_actions(curr);
1199 /* Add the action to lists before any other model-checking tasks */
1200 if (!second_part_of_rmw)
1201 add_action_to_lists(curr);
1203 /* Build may_read_from set for newly-created actions */
1204 if (newly_explored && curr->is_read())
1205 build_reads_from_past(curr);
1207 /* Initialize work_queue with the "current action" work */
1208 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1209 while (!work_queue.empty() && !has_asserted()) {
1210 WorkQueueEntry work = work_queue.front();
1211 work_queue.pop_front();
1213 switch (work.type) {
1214 case WORK_CHECK_CURR_ACTION: {
1215 ModelAction *act = work.action;
1216 bool update = false; /* update this location's release seq's */
1217 bool update_all = false; /* update all release seq's */
1219 if (process_thread_action(curr))
1222 if (act->is_read() && process_read(act, second_part_of_rmw))
1225 if (act->is_write() && process_write(act))
1228 if (act->is_fence() && process_fence(act))
1231 if (act->is_mutex_op() && process_mutex(act))
1234 if (act->is_relseq_fixup())
1235 process_relseq_fixup(curr, &work_queue);
1238 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1240 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1243 case WORK_CHECK_RELEASE_SEQ:
1244 resolve_release_sequences(work.location, &work_queue);
1246 case WORK_CHECK_MO_EDGES: {
1247 /** @todo Complete verification of work_queue */
1248 ModelAction *act = work.action;
1249 bool updated = false;
1251 if (act->is_read()) {
1252 const ModelAction *rf = act->get_reads_from();
1253 if (rf != NULL && r_modification_order(act, rf))
1256 if (act->is_write()) {
1257 if (w_modification_order(act))
1260 mo_graph->commitChanges();
1263 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1272 check_curr_backtracking(curr);
1273 set_backtracking(curr);
1274 return get_next_thread(curr);
1277 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1279 Node *currnode = curr->get_node();
1280 Node *parnode = currnode->get_parent();
1282 if ((!parnode->backtrack_empty() ||
1283 !currnode->misc_empty() ||
1284 !currnode->read_from_empty() ||
1285 !currnode->future_value_empty() ||
1286 !currnode->promise_empty() ||
1287 !currnode->relseq_break_empty())
1288 && (!priv->next_backtrack ||
1289 *curr > *priv->next_backtrack)) {
1290 priv->next_backtrack = curr;
1294 bool ModelChecker::promises_expired() const
1296 for (unsigned int i = 0; i < promises->size(); i++) {
1297 Promise *promise = (*promises)[i];
1298 if (promise->get_expiration() < priv->used_sequence_numbers)
1305 * This is the strongest feasibility check available.
1306 * @return whether the current trace (partial or complete) must be a prefix of
1309 bool ModelChecker::isfeasibleprefix() const
1311 return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1315 * Returns whether the current completed trace is feasible, except for pending
1316 * release sequences.
1318 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1320 if (DBG_ENABLED() && promises->size() != 0)
1321 DEBUG("Infeasible: unrevolved promises\n");
1323 return !is_infeasible() && promises->size() == 0;
1327 * Check if the current partial trace is infeasible. Does not check any
1328 * end-of-execution flags, which might rule out the execution. Thus, this is
1329 * useful only for ruling an execution as infeasible.
1330 * @return whether the current partial trace is infeasible.
1332 bool ModelChecker::is_infeasible() const
1334 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1335 DEBUG("Infeasible: RMW violation\n");
1337 return mo_graph->checkForRMWViolation() || is_infeasible_ignoreRMW();
1341 * Check If the current partial trace is infeasible, while ignoring
1342 * infeasibility related to 2 RMW's reading from the same store. It does not
1343 * check end-of-execution feasibility.
1344 * @see ModelChecker::is_infeasible
1345 * @return whether the current partial trace is infeasible, ignoring multiple
1346 * RMWs reading from the same store.
1348 bool ModelChecker::is_infeasible_ignoreRMW() const
1350 if (DBG_ENABLED()) {
1351 if (mo_graph->checkForCycles())
1352 DEBUG("Infeasible: modification order cycles\n");
1353 if (priv->failed_promise)
1354 DEBUG("Infeasible: failed promise\n");
1355 if (priv->too_many_reads)
1356 DEBUG("Infeasible: too many reads\n");
1357 if (priv->bad_synchronization)
1358 DEBUG("Infeasible: bad synchronization ordering\n");
1359 if (promises_expired())
1360 DEBUG("Infeasible: promises expired\n");
1362 return mo_graph->checkForCycles() || priv->failed_promise ||
1363 priv->too_many_reads || priv->bad_synchronization ||
1367 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1368 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1369 ModelAction *lastread = get_last_action(act->get_tid());
1370 lastread->process_rmw(act);
1371 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1372 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1373 mo_graph->commitChanges();
1379 * Checks whether a thread has read from the same write for too many times
1380 * without seeing the effects of a later write.
1383 * 1) there must a different write that we could read from that would satisfy the modification order,
1384 * 2) we must have read from the same value in excess of maxreads times, and
1385 * 3) that other write must have been in the reads_from set for maxreads times.
1387 * If so, we decide that the execution is no longer feasible.
1389 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1391 if (params.maxreads != 0) {
1392 if (curr->get_node()->get_read_from_size() <= 1)
1394 //Must make sure that execution is currently feasible... We could
1395 //accidentally clear by rolling back
1396 if (is_infeasible())
1398 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1399 int tid = id_to_int(curr->get_tid());
1402 if ((int)thrd_lists->size() <= tid)
1404 action_list_t *list = &(*thrd_lists)[tid];
1406 action_list_t::reverse_iterator rit = list->rbegin();
1407 /* Skip past curr */
1408 for (; (*rit) != curr; rit++)
1410 /* go past curr now */
1413 action_list_t::reverse_iterator ritcopy = rit;
1414 //See if we have enough reads from the same value
1416 for (; count < params.maxreads; rit++,count++) {
1417 if (rit == list->rend())
1419 ModelAction *act = *rit;
1420 if (!act->is_read())
1423 if (act->get_reads_from() != rf)
1425 if (act->get_node()->get_read_from_size() <= 1)
1428 for (int i = 0; i < curr->get_node()->get_read_from_size(); i++) {
1430 const ModelAction *write = curr->get_node()->get_read_from_at(i);
1432 /* Need a different write */
1436 /* Test to see whether this is a feasible write to read from*/
1437 mo_graph->startChanges();
1438 r_modification_order(curr, write);
1439 bool feasiblereadfrom = !is_infeasible();
1440 mo_graph->rollbackChanges();
1442 if (!feasiblereadfrom)
1446 bool feasiblewrite = true;
1447 //new we need to see if this write works for everyone
1449 for (int loop = count; loop>0; loop--,rit++) {
1450 ModelAction *act = *rit;
1451 bool foundvalue = false;
1452 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1453 if (act->get_node()->get_read_from_at(j)==write) {
1459 feasiblewrite = false;
1463 if (feasiblewrite) {
1464 priv->too_many_reads = true;
1472 * Updates the mo_graph with the constraints imposed from the current
1475 * Basic idea is the following: Go through each other thread and find
1476 * the lastest action that happened before our read. Two cases:
1478 * (1) The action is a write => that write must either occur before
1479 * the write we read from or be the write we read from.
1481 * (2) The action is a read => the write that that action read from
1482 * must occur before the write we read from or be the same write.
1484 * @param curr The current action. Must be a read.
1485 * @param rf The action that curr reads from. Must be a write.
1486 * @return True if modification order edges were added; false otherwise
1488 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1490 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1493 ASSERT(curr->is_read());
1495 /* Last SC fence in the current thread */
1496 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1498 /* Iterate over all threads */
1499 for (i = 0; i < thrd_lists->size(); i++) {
1500 /* Last SC fence in thread i */
1501 ModelAction *last_sc_fence_thread_local = NULL;
1502 if (int_to_id((int)i) != curr->get_tid())
1503 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1505 /* Last SC fence in thread i, before last SC fence in current thread */
1506 ModelAction *last_sc_fence_thread_before = NULL;
1507 if (last_sc_fence_local)
1508 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1510 /* Iterate over actions in thread, starting from most recent */
1511 action_list_t *list = &(*thrd_lists)[i];
1512 action_list_t::reverse_iterator rit;
1513 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1514 ModelAction *act = *rit;
1516 if (act->is_write() && act != rf && act != curr) {
1517 /* C++, Section 29.3 statement 5 */
1518 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1519 *act < *last_sc_fence_thread_local) {
1520 mo_graph->addEdge(act, rf);
1524 /* C++, Section 29.3 statement 4 */
1525 else if (act->is_seqcst() && last_sc_fence_local &&
1526 *act < *last_sc_fence_local) {
1527 mo_graph->addEdge(act, rf);
1531 /* C++, Section 29.3 statement 6 */
1532 else if (last_sc_fence_thread_before &&
1533 *act < *last_sc_fence_thread_before) {
1534 mo_graph->addEdge(act, rf);
1541 * Include at most one act per-thread that "happens
1542 * before" curr. Don't consider reflexively.
1544 if (act->happens_before(curr) && act != curr) {
1545 if (act->is_write()) {
1547 mo_graph->addEdge(act, rf);
1551 const ModelAction *prevreadfrom = act->get_reads_from();
1552 //if the previous read is unresolved, keep going...
1553 if (prevreadfrom == NULL)
1556 if (rf != prevreadfrom) {
1557 mo_graph->addEdge(prevreadfrom, rf);
1569 /** This method fixes up the modification order when we resolve a
1570 * promises. The basic problem is that actions that occur after the
1571 * read curr could not property add items to the modification order
1574 * So for each thread, we find the earliest item that happens after
1575 * the read curr. This is the item we have to fix up with additional
1576 * constraints. If that action is write, we add a MO edge between
1577 * the Action rf and that action. If the action is a read, we add a
1578 * MO edge between the Action rf, and whatever the read accessed.
1580 * @param curr is the read ModelAction that we are fixing up MO edges for.
1581 * @param rf is the write ModelAction that curr reads from.
1584 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1586 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1588 ASSERT(curr->is_read());
1590 /* Iterate over all threads */
1591 for (i = 0; i < thrd_lists->size(); i++) {
1592 /* Iterate over actions in thread, starting from most recent */
1593 action_list_t *list = &(*thrd_lists)[i];
1594 action_list_t::reverse_iterator rit;
1595 ModelAction *lastact = NULL;
1597 /* Find last action that happens after curr that is either not curr or a rmw */
1598 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1599 ModelAction *act = *rit;
1600 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1606 /* Include at most one act per-thread that "happens before" curr */
1607 if (lastact != NULL) {
1608 if (lastact==curr) {
1609 //Case 1: The resolved read is a RMW, and we need to make sure
1610 //that the write portion of the RMW mod order after rf
1612 mo_graph->addEdge(rf, lastact);
1613 } else if (lastact->is_read()) {
1614 //Case 2: The resolved read is a normal read and the next
1615 //operation is a read, and we need to make sure the value read
1616 //is mod ordered after rf
1618 const ModelAction *postreadfrom = lastact->get_reads_from();
1619 if (postreadfrom != NULL&&rf != postreadfrom)
1620 mo_graph->addEdge(rf, postreadfrom);
1622 //Case 3: The resolved read is a normal read and the next
1623 //operation is a write, and we need to make sure that the
1624 //write is mod ordered after rf
1626 mo_graph->addEdge(rf, lastact);
1634 * Updates the mo_graph with the constraints imposed from the current write.
1636 * Basic idea is the following: Go through each other thread and find
1637 * the lastest action that happened before our write. Two cases:
1639 * (1) The action is a write => that write must occur before
1642 * (2) The action is a read => the write that that action read from
1643 * must occur before the current write.
1645 * This method also handles two other issues:
1647 * (I) Sequential Consistency: Making sure that if the current write is
1648 * seq_cst, that it occurs after the previous seq_cst write.
1650 * (II) Sending the write back to non-synchronizing reads.
1652 * @param curr The current action. Must be a write.
1653 * @return True if modification order edges were added; false otherwise
1655 bool ModelChecker::w_modification_order(ModelAction *curr)
1657 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1660 ASSERT(curr->is_write());
1662 if (curr->is_seqcst()) {
1663 /* We have to at least see the last sequentially consistent write,
1664 so we are initialized. */
1665 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1666 if (last_seq_cst != NULL) {
1667 mo_graph->addEdge(last_seq_cst, curr);
1672 /* Last SC fence in the current thread */
1673 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1675 /* Iterate over all threads */
1676 for (i = 0; i < thrd_lists->size(); i++) {
1677 /* Last SC fence in thread i, before last SC fence in current thread */
1678 ModelAction *last_sc_fence_thread_before = NULL;
1679 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1680 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1682 /* Iterate over actions in thread, starting from most recent */
1683 action_list_t *list = &(*thrd_lists)[i];
1684 action_list_t::reverse_iterator rit;
1685 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1686 ModelAction *act = *rit;
1689 * 1) If RMW and it actually read from something, then we
1690 * already have all relevant edges, so just skip to next
1693 * 2) If RMW and it didn't read from anything, we should
1694 * whatever edge we can get to speed up convergence.
1696 * 3) If normal write, we need to look at earlier actions, so
1697 * continue processing list.
1699 if (curr->is_rmw()) {
1700 if (curr->get_reads_from()!=NULL)
1708 /* C++, Section 29.3 statement 7 */
1709 if (last_sc_fence_thread_before && act->is_write() &&
1710 *act < *last_sc_fence_thread_before) {
1711 mo_graph->addEdge(act, curr);
1717 * Include at most one act per-thread that "happens
1720 if (act->happens_before(curr)) {
1722 * Note: if act is RMW, just add edge:
1724 * The following edge should be handled elsewhere:
1725 * readfrom(act) --mo--> act
1727 if (act->is_write())
1728 mo_graph->addEdge(act, curr);
1729 else if (act->is_read()) {
1730 //if previous read accessed a null, just keep going
1731 if (act->get_reads_from() == NULL)
1733 mo_graph->addEdge(act->get_reads_from(), curr);
1737 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1738 !act->same_thread(curr)) {
1739 /* We have an action that:
1740 (1) did not happen before us
1741 (2) is a read and we are a write
1742 (3) cannot synchronize with us
1743 (4) is in a different thread
1745 that read could potentially read from our write. Note that
1746 these checks are overly conservative at this point, we'll
1747 do more checks before actually removing the
1751 if (thin_air_constraint_may_allow(curr, act)) {
1752 if (!is_infeasible() ||
1753 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && !is_infeasible_ignoreRMW())) {
1754 struct PendingFutureValue pfv = {curr,act};
1755 futurevalues->push_back(pfv);
1765 /** Arbitrary reads from the future are not allowed. Section 29.3
1766 * part 9 places some constraints. This method checks one result of constraint
1767 * constraint. Others require compiler support. */
1768 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1770 if (!writer->is_rmw())
1773 if (!reader->is_rmw())
1776 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1777 if (search == reader)
1779 if (search->get_tid() == reader->get_tid() &&
1780 search->happens_before(reader))
1788 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1789 * some constraints. This method checks one the following constraint (others
1790 * require compiler support):
1792 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1794 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1796 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1798 /* Iterate over all threads */
1799 for (i = 0; i < thrd_lists->size(); i++) {
1800 const ModelAction *write_after_read = NULL;
1802 /* Iterate over actions in thread, starting from most recent */
1803 action_list_t *list = &(*thrd_lists)[i];
1804 action_list_t::reverse_iterator rit;
1805 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1806 ModelAction *act = *rit;
1808 if (!reader->happens_before(act))
1810 else if (act->is_write())
1811 write_after_read = act;
1812 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1813 write_after_read = act->get_reads_from();
1817 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1824 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1825 * The ModelAction under consideration is expected to be taking part in
1826 * release/acquire synchronization as an object of the "reads from" relation.
1827 * Note that this can only provide release sequence support for RMW chains
1828 * which do not read from the future, as those actions cannot be traced until
1829 * their "promise" is fulfilled. Similarly, we may not even establish the
1830 * presence of a release sequence with certainty, as some modification order
1831 * constraints may be decided further in the future. Thus, this function
1832 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1833 * and a boolean representing certainty.
1835 * @param rf The action that might be part of a release sequence. Must be a
1837 * @param release_heads A pass-by-reference style return parameter. After
1838 * execution of this function, release_heads will contain the heads of all the
1839 * relevant release sequences, if any exists with certainty
1840 * @param pending A pass-by-reference style return parameter which is only used
1841 * when returning false (i.e., uncertain). Returns most information regarding
1842 * an uncertain release sequence, including any write operations that might
1843 * break the sequence.
1844 * @return true, if the ModelChecker is certain that release_heads is complete;
1847 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1848 rel_heads_list_t *release_heads,
1849 struct release_seq *pending) const
1851 /* Only check for release sequences if there are no cycles */
1852 if (mo_graph->checkForCycles())
1856 ASSERT(rf->is_write());
1858 if (rf->is_release())
1859 release_heads->push_back(rf);
1860 else if (rf->get_last_fence_release())
1861 release_heads->push_back(rf->get_last_fence_release());
1863 break; /* End of RMW chain */
1865 /** @todo Need to be smarter here... In the linux lock
1866 * example, this will run to the beginning of the program for
1868 /** @todo The way to be smarter here is to keep going until 1
1869 * thread has a release preceded by an acquire and you've seen
1872 /* acq_rel RMW is a sufficient stopping condition */
1873 if (rf->is_acquire() && rf->is_release())
1874 return true; /* complete */
1876 rf = rf->get_reads_from();
1879 /* read from future: need to settle this later */
1881 return false; /* incomplete */
1884 if (rf->is_release())
1885 return true; /* complete */
1887 /* else relaxed write
1888 * - check for fence-release in the same thread (29.8, stmt. 3)
1889 * - check modification order for contiguous subsequence
1890 * -> rf must be same thread as release */
1892 const ModelAction *fence_release = rf->get_last_fence_release();
1893 /* Synchronize with a fence-release unconditionally; we don't need to
1894 * find any more "contiguous subsequence..." for it */
1896 release_heads->push_back(fence_release);
1898 int tid = id_to_int(rf->get_tid());
1899 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1900 action_list_t *list = &(*thrd_lists)[tid];
1901 action_list_t::const_reverse_iterator rit;
1903 /* Find rf in the thread list */
1904 rit = std::find(list->rbegin(), list->rend(), rf);
1905 ASSERT(rit != list->rend());
1907 /* Find the last {write,fence}-release */
1908 for (; rit != list->rend(); rit++) {
1909 if (fence_release && *(*rit) < *fence_release)
1911 if ((*rit)->is_release())
1914 if (rit == list->rend()) {
1915 /* No write-release in this thread */
1916 return true; /* complete */
1917 } else if (fence_release && *(*rit) < *fence_release) {
1918 /* The fence-release is more recent (and so, "stronger") than
1919 * the most recent write-release */
1920 return true; /* complete */
1921 } /* else, need to establish contiguous release sequence */
1922 ModelAction *release = *rit;
1924 ASSERT(rf->same_thread(release));
1926 pending->writes.clear();
1928 bool certain = true;
1929 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1930 if (id_to_int(rf->get_tid()) == (int)i)
1932 list = &(*thrd_lists)[i];
1934 /* Can we ensure no future writes from this thread may break
1935 * the release seq? */
1936 bool future_ordered = false;
1938 ModelAction *last = get_last_action(int_to_id(i));
1939 Thread *th = get_thread(int_to_id(i));
1940 if ((last && rf->happens_before(last)) ||
1943 future_ordered = true;
1945 ASSERT(!th->is_model_thread() || future_ordered);
1947 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1948 const ModelAction *act = *rit;
1949 /* Reach synchronization -> this thread is complete */
1950 if (act->happens_before(release))
1952 if (rf->happens_before(act)) {
1953 future_ordered = true;
1957 /* Only non-RMW writes can break release sequences */
1958 if (!act->is_write() || act->is_rmw())
1961 /* Check modification order */
1962 if (mo_graph->checkReachable(rf, act)) {
1963 /* rf --mo--> act */
1964 future_ordered = true;
1967 if (mo_graph->checkReachable(act, release))
1968 /* act --mo--> release */
1970 if (mo_graph->checkReachable(release, act) &&
1971 mo_graph->checkReachable(act, rf)) {
1972 /* release --mo-> act --mo--> rf */
1973 return true; /* complete */
1975 /* act may break release sequence */
1976 pending->writes.push_back(act);
1979 if (!future_ordered)
1980 certain = false; /* This thread is uncertain */
1984 release_heads->push_back(release);
1985 pending->writes.clear();
1987 pending->release = release;
1994 * An interface for getting the release sequence head(s) with which a
1995 * given ModelAction must synchronize. This function only returns a non-empty
1996 * result when it can locate a release sequence head with certainty. Otherwise,
1997 * it may mark the internal state of the ModelChecker so that it will handle
1998 * the release sequence at a later time, causing @a acquire to update its
1999 * synchronization at some later point in execution.
2001 * @param acquire The 'acquire' action that may synchronize with a release
2003 * @param read The read action that may read from a release sequence; this may
2004 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2005 * when 'acquire' is a fence-acquire)
2006 * @param release_heads A pass-by-reference return parameter. Will be filled
2007 * with the head(s) of the release sequence(s), if they exists with certainty.
2008 * @see ModelChecker::release_seq_heads
2010 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2011 ModelAction *read, rel_heads_list_t *release_heads)
2013 const ModelAction *rf = read->get_reads_from();
2014 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2015 sequence->acquire = acquire;
2016 sequence->read = read;
2018 if (!release_seq_heads(rf, release_heads, sequence)) {
2019 /* add act to 'lazy checking' list */
2020 pending_rel_seqs->push_back(sequence);
2022 snapshot_free(sequence);
2027 * Attempt to resolve all stashed operations that might synchronize with a
2028 * release sequence for a given location. This implements the "lazy" portion of
2029 * determining whether or not a release sequence was contiguous, since not all
2030 * modification order information is present at the time an action occurs.
2032 * @param location The location/object that should be checked for release
2033 * sequence resolutions. A NULL value means to check all locations.
2034 * @param work_queue The work queue to which to add work items as they are
2036 * @return True if any updates occurred (new synchronization, new mo_graph
2039 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2041 bool updated = false;
2042 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2043 while (it != pending_rel_seqs->end()) {
2044 struct release_seq *pending = *it;
2045 ModelAction *acquire = pending->acquire;
2046 const ModelAction *read = pending->read;
2048 /* Only resolve sequences on the given location, if provided */
2049 if (location && read->get_location() != location) {
2054 const ModelAction *rf = read->get_reads_from();
2055 rel_heads_list_t release_heads;
2057 complete = release_seq_heads(rf, &release_heads, pending);
2058 for (unsigned int i = 0; i < release_heads.size(); i++) {
2059 if (!acquire->has_synchronized_with(release_heads[i])) {
2060 if (acquire->synchronize_with(release_heads[i]))
2063 set_bad_synchronization();
2068 /* Re-check all pending release sequences */
2069 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2070 /* Re-check read-acquire for mo_graph edges */
2071 if (acquire->is_read())
2072 work_queue->push_back(MOEdgeWorkEntry(acquire));
2074 /* propagate synchronization to later actions */
2075 action_list_t::reverse_iterator rit = action_trace->rbegin();
2076 for (; (*rit) != acquire; rit++) {
2077 ModelAction *propagate = *rit;
2078 if (acquire->happens_before(propagate)) {
2079 propagate->synchronize_with(acquire);
2080 /* Re-check 'propagate' for mo_graph edges */
2081 work_queue->push_back(MOEdgeWorkEntry(propagate));
2086 it = pending_rel_seqs->erase(it);
2087 snapshot_free(pending);
2093 // If we resolved promises or data races, see if we have realized a data race.
2100 * Performs various bookkeeping operations for the current ModelAction. For
2101 * instance, adds action to the per-object, per-thread action vector and to the
2102 * action trace list of all thread actions.
2104 * @param act is the ModelAction to add.
2106 void ModelChecker::add_action_to_lists(ModelAction *act)
2108 int tid = id_to_int(act->get_tid());
2109 ModelAction *uninit = NULL;
2111 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2112 if (list->empty() && act->is_atomic_var()) {
2113 uninit = new_uninitialized_action(act->get_location());
2114 uninit_id = id_to_int(uninit->get_tid());
2115 list->push_back(uninit);
2117 list->push_back(act);
2119 action_trace->push_back(act);
2121 action_trace->push_front(uninit);
2123 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2124 if (tid >= (int)vec->size())
2125 vec->resize(priv->next_thread_id);
2126 (*vec)[tid].push_back(act);
2128 (*vec)[uninit_id].push_front(uninit);
2130 if ((int)thrd_last_action->size() <= tid)
2131 thrd_last_action->resize(get_num_threads());
2132 (*thrd_last_action)[tid] = act;
2134 (*thrd_last_action)[uninit_id] = uninit;
2136 if (act->is_fence() && act->is_release()) {
2137 if ((int)thrd_last_fence_release->size() <= tid)
2138 thrd_last_fence_release->resize(get_num_threads());
2139 (*thrd_last_fence_release)[tid] = act;
2142 if (act->is_wait()) {
2143 void *mutex_loc=(void *) act->get_value();
2144 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2146 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2147 if (tid >= (int)vec->size())
2148 vec->resize(priv->next_thread_id);
2149 (*vec)[tid].push_back(act);
2154 * @brief Get the last action performed by a particular Thread
2155 * @param tid The thread ID of the Thread in question
2156 * @return The last action in the thread
2158 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2160 int threadid = id_to_int(tid);
2161 if (threadid < (int)thrd_last_action->size())
2162 return (*thrd_last_action)[id_to_int(tid)];
2168 * @brief Get the last fence release performed by a particular Thread
2169 * @param tid The thread ID of the Thread in question
2170 * @return The last fence release in the thread, if one exists; NULL otherwise
2172 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2174 int threadid = id_to_int(tid);
2175 if (threadid < (int)thrd_last_fence_release->size())
2176 return (*thrd_last_fence_release)[id_to_int(tid)];
2182 * Gets the last memory_order_seq_cst write (in the total global sequence)
2183 * performed on a particular object (i.e., memory location), not including the
2185 * @param curr The current ModelAction; also denotes the object location to
2187 * @return The last seq_cst write
2189 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2191 void *location = curr->get_location();
2192 action_list_t *list = get_safe_ptr_action(obj_map, location);
2193 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2194 action_list_t::reverse_iterator rit;
2195 for (rit = list->rbegin(); rit != list->rend(); rit++)
2196 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2202 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2203 * performed in a particular thread, prior to a particular fence.
2204 * @param tid The ID of the thread to check
2205 * @param before_fence The fence from which to begin the search; if NULL, then
2206 * search for the most recent fence in the thread.
2207 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2209 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2211 /* All fences should have NULL location */
2212 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2213 action_list_t::reverse_iterator rit = list->rbegin();
2216 for (; rit != list->rend(); rit++)
2217 if (*rit == before_fence)
2220 ASSERT(*rit == before_fence);
2224 for (; rit != list->rend(); rit++)
2225 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2231 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2232 * location). This function identifies the mutex according to the current
2233 * action, which is presumed to perform on the same mutex.
2234 * @param curr The current ModelAction; also denotes the object location to
2236 * @return The last unlock operation
2238 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2240 void *location = curr->get_location();
2241 action_list_t *list = get_safe_ptr_action(obj_map, location);
2242 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2243 action_list_t::reverse_iterator rit;
2244 for (rit = list->rbegin(); rit != list->rend(); rit++)
2245 if ((*rit)->is_unlock() || (*rit)->is_wait())
2250 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2252 ModelAction *parent = get_last_action(tid);
2254 parent = get_thread(tid)->get_creation();
2259 * Returns the clock vector for a given thread.
2260 * @param tid The thread whose clock vector we want
2261 * @return Desired clock vector
2263 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2265 return get_parent_action(tid)->get_cv();
2269 * Resolve a set of Promises with a current write. The set is provided in the
2270 * Node corresponding to @a write.
2271 * @param write The ModelAction that is fulfilling Promises
2272 * @return True if promises were resolved; false otherwise
2274 bool ModelChecker::resolve_promises(ModelAction *write)
2276 bool resolved = false;
2277 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
2279 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2280 Promise *promise = (*promises)[promise_index];
2281 if (write->get_node()->get_promise(i)) {
2282 ModelAction *read = promise->get_action();
2283 if (read->is_rmw()) {
2284 mo_graph->addRMWEdge(write, read);
2286 read_from(read, write);
2287 //First fix up the modification order for actions that happened
2289 r_modification_order(read, write);
2290 //Next fix up the modification order for actions that happened
2292 post_r_modification_order(read, write);
2293 //Make sure the promise's value matches the write's value
2294 ASSERT(promise->get_value() == write->get_value());
2297 promises->erase(promises->begin() + promise_index);
2298 threads_to_check.push_back(read->get_tid());
2305 //Check whether reading these writes has made threads unable to
2308 for(unsigned int i=0;i<threads_to_check.size();i++)
2309 mo_check_promises(threads_to_check[i], write);
2315 * Compute the set of promises that could potentially be satisfied by this
2316 * action. Note that the set computation actually appears in the Node, not in
2318 * @param curr The ModelAction that may satisfy promises
2320 void ModelChecker::compute_promises(ModelAction *curr)
2322 for (unsigned int i = 0; i < promises->size(); i++) {
2323 Promise *promise = (*promises)[i];
2324 const ModelAction *act = promise->get_action();
2325 if (!act->happens_before(curr) &&
2327 !act->could_synchronize_with(curr) &&
2328 !act->same_thread(curr) &&
2329 act->get_location() == curr->get_location() &&
2330 promise->get_value() == curr->get_value()) {
2331 curr->get_node()->set_promise(i, act->is_rmw());
2336 /** Checks promises in response to change in ClockVector Threads. */
2337 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2339 for (unsigned int i = 0; i < promises->size(); i++) {
2340 Promise *promise = (*promises)[i];
2341 const ModelAction *act = promise->get_action();
2342 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2343 merge_cv->synchronized_since(act)) {
2344 if (promise->increment_threads(tid)) {
2345 //Promise has failed
2346 priv->failed_promise = true;
2353 void ModelChecker::check_promises_thread_disabled() {
2354 for (unsigned int i = 0; i < promises->size(); i++) {
2355 Promise *promise = (*promises)[i];
2356 if (promise->check_promise()) {
2357 priv->failed_promise = true;
2363 /** Checks promises in response to addition to modification order for threads.
2365 * pthread is the thread that performed the read that created the promise
2367 * pread is the read that created the promise
2369 * pwrite is either the first write to same location as pread by
2370 * pthread that is sequenced after pread or the value read by the
2371 * first read to the same lcoation as pread by pthread that is
2372 * sequenced after pread..
2374 * 1. If tid=pthread, then we check what other threads are reachable
2375 * through the mode order starting with pwrite. Those threads cannot
2376 * perform a write that will resolve the promise due to modification
2377 * order constraints.
2379 * 2. If the tid is not pthread, we check whether pwrite can reach the
2380 * action write through the modification order. If so, that thread
2381 * cannot perform a future write that will resolve the promise due to
2382 * modificatin order constraints.
2384 * @param tid The thread that either read from the model action
2385 * write, or actually did the model action write.
2387 * @param write The ModelAction representing the relevant write.
2389 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write)
2391 void *location = write->get_location();
2392 for (unsigned int i = 0; i < promises->size(); i++) {
2393 Promise *promise = (*promises)[i];
2394 const ModelAction *act = promise->get_action();
2396 //Is this promise on the same location?
2397 if ( act->get_location() != location )
2400 //same thread as the promise
2401 if ( act->get_tid()==tid ) {
2403 //do we have a pwrite for the promise, if not, set it
2404 if (promise->get_write() == NULL ) {
2405 promise->set_write(write);
2406 //The pwrite cannot happen before the promise
2407 if (write->happens_before(act) && (write != act)) {
2408 priv->failed_promise = true;
2412 if (mo_graph->checkPromise(write, promise)) {
2413 priv->failed_promise = true;
2418 //Don't do any lookups twice for the same thread
2419 if (promise->has_sync_thread(tid))
2422 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2423 if (promise->increment_threads(tid)) {
2424 priv->failed_promise = true;
2432 * Compute the set of writes that may break the current pending release
2433 * sequence. This information is extracted from previou release sequence
2436 * @param curr The current ModelAction. Must be a release sequence fixup
2439 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2441 if (pending_rel_seqs->empty())
2444 struct release_seq *pending = pending_rel_seqs->back();
2445 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2446 const ModelAction *write = pending->writes[i];
2447 curr->get_node()->add_relseq_break(write);
2450 /* NULL means don't break the sequence; just synchronize */
2451 curr->get_node()->add_relseq_break(NULL);
2455 * Build up an initial set of all past writes that this 'read' action may read
2456 * from. This set is determined by the clock vector's "happens before"
2458 * @param curr is the current ModelAction that we are exploring; it must be a
2461 void ModelChecker::build_reads_from_past(ModelAction *curr)
2463 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2465 ASSERT(curr->is_read());
2467 ModelAction *last_sc_write = NULL;
2469 if (curr->is_seqcst())
2470 last_sc_write = get_last_seq_cst_write(curr);
2472 /* Iterate over all threads */
2473 for (i = 0; i < thrd_lists->size(); i++) {
2474 /* Iterate over actions in thread, starting from most recent */
2475 action_list_t *list = &(*thrd_lists)[i];
2476 action_list_t::reverse_iterator rit;
2477 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2478 ModelAction *act = *rit;
2480 /* Only consider 'write' actions */
2481 if (!act->is_write() || act == curr)
2484 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2485 bool allow_read = true;
2487 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2489 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2493 DEBUG("Adding action to may_read_from:\n");
2494 if (DBG_ENABLED()) {
2498 curr->get_node()->add_read_from(act);
2501 /* Include at most one act per-thread that "happens before" curr */
2502 if (act->happens_before(curr))
2507 if (DBG_ENABLED()) {
2508 model_print("Reached read action:\n");
2510 model_print("Printing may_read_from\n");
2511 curr->get_node()->print_may_read_from();
2512 model_print("End printing may_read_from\n");
2516 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2519 /* UNINIT actions don't have a Node, and they never sleep */
2520 if (write->is_uninitialized())
2522 Node *prevnode = write->get_node()->get_parent();
2524 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2525 if (write->is_release() && thread_sleep)
2527 if (!write->is_rmw()) {
2530 if (write->get_reads_from() == NULL)
2532 write=write->get_reads_from();
2537 * @brief Create a new action representing an uninitialized atomic
2538 * @param location The memory location of the atomic object
2539 * @return A pointer to a new ModelAction
2541 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2543 ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2544 act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2545 act->create_cv(NULL);
2549 static void print_list(action_list_t *list, int exec_num = -1)
2551 action_list_t::iterator it;
2553 model_print("---------------------------------------------------------------------\n");
2555 model_print("Execution %d:\n", exec_num);
2557 unsigned int hash=0;
2559 for (it = list->begin(); it != list->end(); it++) {
2561 hash=hash^(hash<<3)^((*it)->hash());
2563 model_print("HASH %u\n", hash);
2564 model_print("---------------------------------------------------------------------\n");
2567 #if SUPPORT_MOD_ORDER_DUMP
2568 void ModelChecker::dumpGraph(char *filename) {
2570 sprintf(buffer, "%s.dot",filename);
2571 FILE *file=fopen(buffer, "w");
2572 fprintf(file, "digraph %s {\n",filename);
2573 mo_graph->dumpNodes(file);
2574 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2576 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2577 ModelAction *action=*it;
2578 if (action->is_read()) {
2579 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2580 if (action->get_reads_from()!=NULL)
2581 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2583 if (thread_array[action->get_tid()] != NULL) {
2584 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2587 thread_array[action->get_tid()]=action;
2589 fprintf(file,"}\n");
2590 model_free(thread_array);
2595 /** @brief Prints an execution trace summary. */
2596 void ModelChecker::print_summary() const
2598 #if SUPPORT_MOD_ORDER_DUMP
2600 char buffername[100];
2601 sprintf(buffername, "exec%04u", stats.num_total);
2602 mo_graph->dumpGraphToFile(buffername);
2603 sprintf(buffername, "graph%04u", stats.num_total);
2604 dumpGraph(buffername);
2607 if (!isfeasibleprefix())
2608 model_print("INFEASIBLE EXECUTION!\n");
2609 print_list(action_trace, stats.num_total);
2614 * Add a Thread to the system for the first time. Should only be called once
2616 * @param t The Thread to add
2618 void ModelChecker::add_thread(Thread *t)
2620 thread_map->put(id_to_int(t->get_id()), t);
2621 scheduler->add_thread(t);
2625 * Removes a thread from the scheduler.
2626 * @param the thread to remove.
2628 void ModelChecker::remove_thread(Thread *t)
2630 scheduler->remove_thread(t);
2634 * @brief Get a Thread reference by its ID
2635 * @param tid The Thread's ID
2636 * @return A Thread reference
2638 Thread * ModelChecker::get_thread(thread_id_t tid) const
2640 return thread_map->get(id_to_int(tid));
2644 * @brief Get a reference to the Thread in which a ModelAction was executed
2645 * @param act The ModelAction
2646 * @return A Thread reference
2648 Thread * ModelChecker::get_thread(ModelAction *act) const
2650 return get_thread(act->get_tid());
2654 * @brief Check if a Thread is currently enabled
2655 * @param t The Thread to check
2656 * @return True if the Thread is currently enabled
2658 bool ModelChecker::is_enabled(Thread *t) const
2660 return scheduler->is_enabled(t);
2664 * @brief Check if a Thread is currently enabled
2665 * @param tid The ID of the Thread to check
2666 * @return True if the Thread is currently enabled
2668 bool ModelChecker::is_enabled(thread_id_t tid) const
2670 return scheduler->is_enabled(tid);
2674 * Switch from a user-context to the "master thread" context (a.k.a. system
2675 * context). This switch is made with the intention of exploring a particular
2676 * model-checking action (described by a ModelAction object). Must be called
2677 * from a user-thread context.
2679 * @param act The current action that will be explored. May be NULL only if
2680 * trace is exiting via an assertion (see ModelChecker::set_assert and
2681 * ModelChecker::has_asserted).
2682 * @return Return the value returned by the current action
2684 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2687 Thread *old = thread_current();
2688 set_current_action(act);
2689 old->set_state(THREAD_READY);
2690 if (Thread::swap(old, &system_context) < 0) {
2691 perror("swap threads");
2694 return old->get_return_value();
2698 * Takes the next step in the execution, if possible.
2699 * @return Returns true (success) if a step was taken and false otherwise.
2701 bool ModelChecker::take_step() {
2705 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2707 if (curr->get_state() == THREAD_READY) {
2708 ASSERT(priv->current_action);
2710 priv->nextThread = check_current_action(priv->current_action);
2711 priv->current_action = NULL;
2713 if (curr->is_blocked() || curr->is_complete())
2714 scheduler->remove_thread(curr);
2719 Thread *next = scheduler->next_thread(priv->nextThread);
2721 /* Infeasible -> don't take any more steps */
2722 if (is_infeasible())
2724 else if (isfeasibleprefix() && have_bug_reports()) {
2729 if (params.bound != 0) {
2730 if (priv->used_sequence_numbers > params.bound) {
2735 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2736 next ? id_to_int(next->get_id()) : -1);
2739 * Launch end-of-execution release sequence fixups only when there are:
2741 * (1) no more user threads to run (or when execution replay chooses
2742 * the 'model_thread')
2743 * (2) pending release sequences
2744 * (3) pending assertions (i.e., data races)
2745 * (4) no pending promises
2747 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2748 is_feasible_prefix_ignore_relseq() && !unrealizedraces.empty()) {
2749 model_print("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2750 pending_rel_seqs->size());
2751 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2752 std::memory_order_seq_cst, NULL, VALUE_NONE,
2754 set_current_action(fixup);
2758 /* next == NULL -> don't take any more steps */
2762 next->set_state(THREAD_RUNNING);
2764 if (next->get_pending() != NULL) {
2765 /* restart a pending action */
2766 set_current_action(next->get_pending());
2767 next->set_pending(NULL);
2768 next->set_state(THREAD_READY);
2772 /* Return false only if swap fails with an error */
2773 return (Thread::swap(&system_context, next) == 0);
2776 /** Wrapper to run the user's main function, with appropriate arguments */
2777 void user_main_wrapper(void *)
2779 user_main(model->params.argc, model->params.argv);
2782 /** @brief Run ModelChecker for the user program */
2783 void ModelChecker::run()
2788 /* Start user program */
2789 add_thread(new Thread(&user_thread, &user_main_wrapper, NULL));
2791 /* Wait for all threads to complete */
2792 while (take_step());
2793 } while (next_execution());