9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
17 #define INITIAL_THREAD_ID 0
22 bug_message(const char *str) {
23 const char *fmt = " [BUG] %s\n";
24 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
25 sprintf(msg, fmt, str);
27 ~bug_message() { if (msg) snapshot_free(msg); }
30 void print() { printf("%s", msg); }
36 * Structure for holding small ModelChecker members that should be snapshotted
38 struct model_snapshot_members {
39 ModelAction *current_action;
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
43 ModelAction *next_backtrack;
44 std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
45 struct execution_stats stats;
48 /** @brief Constructor */
49 ModelChecker::ModelChecker(struct model_params params) :
50 /* Initialize default scheduler */
52 scheduler(new Scheduler()),
54 earliest_diverge(NULL),
55 action_trace(new action_list_t()),
56 thread_map(new HashTable<int, Thread *, int>()),
57 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
58 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
59 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
60 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
61 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
62 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
63 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
64 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
65 node_stack(new NodeStack()),
66 mo_graph(new CycleGraph()),
67 failed_promise(false),
68 too_many_reads(false),
70 bad_synchronization(false)
72 /* Allocate this "size" on the snapshotting heap */
73 priv = (struct model_snapshot_members *)snapshot_calloc(1, sizeof(*priv));
74 /* First thread created will have id INITIAL_THREAD_ID */
75 priv->next_thread_id = INITIAL_THREAD_ID;
77 /* Initialize a model-checker thread, for special ModelActions */
78 model_thread = new Thread(get_next_id());
79 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
82 /** @brief Destructor */
83 ModelChecker::~ModelChecker()
85 for (unsigned int i = 0; i < get_num_threads(); i++)
86 delete thread_map->get(i);
91 delete lock_waiters_map;
92 delete condvar_waiters_map;
95 for (unsigned int i = 0; i < promises->size(); i++)
96 delete (*promises)[i];
99 delete pending_rel_seqs;
101 delete thrd_last_action;
106 for (unsigned int i = 0; i < priv->bugs.size(); i++)
107 delete priv->bugs[i];
112 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
113 action_list_t * tmp=hash->get(ptr);
115 tmp=new action_list_t();
121 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
122 std::vector<action_list_t> * tmp=hash->get(ptr);
124 tmp=new std::vector<action_list_t>();
131 * Restores user program to initial state and resets all model-checker data
134 void ModelChecker::reset_to_initial_state()
136 DEBUG("+++ Resetting to initial state +++\n");
137 node_stack->reset_execution();
138 failed_promise = false;
139 too_many_reads = false;
140 bad_synchronization = false;
142 snapshotObject->backTrackBeforeStep(0);
145 /** @return a thread ID for a new Thread */
146 thread_id_t ModelChecker::get_next_id()
148 return priv->next_thread_id++;
151 /** @return the number of user threads created during this execution */
152 unsigned int ModelChecker::get_num_threads() const
154 return priv->next_thread_id;
157 /** @return The currently executing Thread. */
158 Thread * ModelChecker::get_current_thread()
160 return scheduler->get_current_thread();
163 /** @return a sequence number for a new ModelAction */
164 modelclock_t ModelChecker::get_next_seq_num()
166 return ++priv->used_sequence_numbers;
169 Node * ModelChecker::get_curr_node() {
170 return node_stack->get_head();
174 * @brief Choose the next thread to execute.
176 * This function chooses the next thread that should execute. It can force the
177 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
178 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
179 * The model-checker may have no preference regarding the next thread (i.e.,
180 * when exploring a new execution ordering), in which case this will return
182 * @param curr The current ModelAction. This action might guide the choice of
184 * @return The next thread to run. If the model-checker has no preference, NULL.
186 Thread * ModelChecker::get_next_thread(ModelAction *curr)
191 /* Do not split atomic actions. */
193 return thread_current();
194 /* The THREAD_CREATE action points to the created Thread */
195 else if (curr->get_type() == THREAD_CREATE)
196 return (Thread *)curr->get_location();
199 /* Have we completed exploring the preselected path? */
203 /* Else, we are trying to replay an execution */
204 ModelAction *next = node_stack->get_next()->get_action();
206 if (next == diverge) {
207 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
208 earliest_diverge=diverge;
210 Node *nextnode = next->get_node();
211 Node *prevnode = nextnode->get_parent();
212 scheduler->update_sleep_set(prevnode);
214 /* Reached divergence point */
215 if (nextnode->increment_misc()) {
216 /* The next node will try to satisfy a different misc_index values. */
217 tid = next->get_tid();
218 node_stack->pop_restofstack(2);
219 } else if (nextnode->increment_promise()) {
220 /* The next node will try to satisfy a different set of promises. */
221 tid = next->get_tid();
222 node_stack->pop_restofstack(2);
223 } else if (nextnode->increment_read_from()) {
224 /* The next node will read from a different value. */
225 tid = next->get_tid();
226 node_stack->pop_restofstack(2);
227 } else if (nextnode->increment_future_value()) {
228 /* The next node will try to read from a different future value. */
229 tid = next->get_tid();
230 node_stack->pop_restofstack(2);
231 } else if (nextnode->increment_relseq_break()) {
232 /* The next node will try to resolve a release sequence differently */
233 tid = next->get_tid();
234 node_stack->pop_restofstack(2);
236 /* Make a different thread execute for next step */
237 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
238 tid = prevnode->get_next_backtrack();
239 /* Make sure the backtracked thread isn't sleeping. */
240 node_stack->pop_restofstack(1);
241 if (diverge==earliest_diverge) {
242 earliest_diverge=prevnode->get_action();
245 /* The correct sleep set is in the parent node. */
248 DEBUG("*** Divergence point ***\n");
252 tid = next->get_tid();
254 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
255 ASSERT(tid != THREAD_ID_T_NONE);
256 return thread_map->get(id_to_int(tid));
260 * We need to know what the next actions of all threads in the sleep
261 * set will be. This method computes them and stores the actions at
262 * the corresponding thread object's pending action.
265 void ModelChecker::execute_sleep_set() {
266 for(unsigned int i=0;i<get_num_threads();i++) {
267 thread_id_t tid=int_to_id(i);
268 Thread *thr=get_thread(tid);
269 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
270 thr->get_pending() == NULL ) {
271 thr->set_state(THREAD_RUNNING);
272 scheduler->next_thread(thr);
273 Thread::swap(&system_context, thr);
274 priv->current_action->set_sleep_flag();
275 thr->set_pending(priv->current_action);
278 priv->current_action = NULL;
281 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
282 for(unsigned int i=0;i<get_num_threads();i++) {
283 thread_id_t tid=int_to_id(i);
284 Thread *thr=get_thread(tid);
285 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
286 ModelAction *pending_act=thr->get_pending();
287 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
288 //Remove this thread from sleep set
289 scheduler->remove_sleep(thr);
296 * Check if we are in a deadlock. Should only be called at the end of an
297 * execution, although it should not give false positives in the middle of an
298 * execution (there should be some ENABLED thread).
300 * @return True if program is in a deadlock; false otherwise
302 bool ModelChecker::is_deadlocked() const
304 bool blocking_threads = false;
305 for (unsigned int i = 0; i < get_num_threads(); i++) {
306 thread_id_t tid = int_to_id(i);
309 Thread *t = get_thread(tid);
310 if (!t->is_model_thread() && t->get_pending())
311 blocking_threads = true;
313 return blocking_threads;
317 * Check if this is a complete execution. That is, have all thread completed
318 * execution (rather than exiting because sleep sets have forced a redundant
321 * @return True if the execution is complete.
323 bool ModelChecker::is_complete_execution() const
325 for (unsigned int i = 0; i < get_num_threads(); i++)
326 if (is_enabled(int_to_id(i)))
332 * @brief Assert a bug in the executing program.
334 * Use this function to assert any sort of bug in the user program. If the
335 * current trace is feasible (actually, a prefix of some feasible execution),
336 * then this execution will be aborted, printing the appropriate message. If
337 * the current trace is not yet feasible, the error message will be stashed and
338 * printed if the execution ever becomes feasible.
340 * This function can also be used to immediately trigger the bug; that is, we
341 * don't wait for a feasible execution before aborting. Only use the
342 * "immediate" option when you know that the infeasibility is justified (e.g.,
343 * pending release sequences are not a problem)
345 * @param msg Descriptive message for the bug (do not include newline char)
346 * @param user_thread Was this assertion triggered from a user thread?
347 * @param immediate Should this bug be triggered immediately?
349 void ModelChecker::assert_bug(const char *msg, bool user_thread, bool immediate)
351 priv->bugs.push_back(new bug_message(msg));
353 if (immediate || isfeasibleprefix()) {
356 switch_to_master(NULL);
361 * @brief Assert a bug in the executing program, with a default message
362 * @see ModelChecker::assert_bug
363 * @param user_thread Was this assertion triggered from a user thread?
365 void ModelChecker::assert_bug(bool user_thread)
367 assert_bug("bug detected", user_thread);
371 * @brief Assert a bug in the executing program immediately
372 * @see ModelChecker::assert_bug
373 * @param msg Descriptive message for the bug (do not include newline char)
375 void ModelChecker::assert_bug_immediate(const char *msg)
377 printf("Feasible: %s\n", isfeasibleprefix() ? "yes" : "no");
378 assert_bug(msg, false, true);
381 /** @return True, if any bugs have been reported for this execution */
382 bool ModelChecker::have_bug_reports() const
384 return priv->bugs.size() != 0;
387 /** @brief Print bug report listing for this execution (if any bugs exist) */
388 void ModelChecker::print_bugs() const
390 if (have_bug_reports()) {
391 printf("Bug report: %zu bugs detected\n", priv->bugs.size());
392 for (unsigned int i = 0; i < priv->bugs.size(); i++)
393 priv->bugs[i]->print();
398 * @brief Record end-of-execution stats
400 * Must be run when exiting an execution. Records various stats.
401 * @see struct execution_stats
403 void ModelChecker::record_stats()
406 if (!isfinalfeasible())
407 stats.num_infeasible++;
408 else if (have_bug_reports())
409 stats.num_buggy_executions++;
410 else if (is_complete_execution())
411 stats.num_complete++;
414 /** @brief Print execution stats */
415 void ModelChecker::print_stats() const
417 printf("Number of complete, bug-free executions: %d\n", stats.num_complete);
418 printf("Number of buggy executions: %d\n", stats.num_buggy_executions);
419 printf("Number of infeasible executions: %d\n", stats.num_infeasible);
420 printf("Total executions: %d\n", stats.num_total);
421 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
425 * Queries the model-checker for more executions to explore and, if one
426 * exists, resets the model-checker state to execute a new execution.
428 * @return If there are more executions to explore, return true. Otherwise,
431 bool ModelChecker::next_execution()
437 if (isfinalfeasible() && (is_complete_execution() || have_bug_reports())) {
438 printf("Earliest divergence point since last feasible execution:\n");
439 if (earliest_diverge)
440 earliest_diverge->print();
442 printf("(Not set)\n");
444 earliest_diverge = NULL;
447 assert_bug("Deadlock detected");
454 } else if (DBG_ENABLED()) {
459 if ((diverge = get_next_backtrack()) == NULL)
463 printf("Next execution will diverge at:\n");
467 reset_to_initial_state();
471 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
473 switch (act->get_type()) {
477 /* linear search: from most recent to oldest */
478 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
479 action_list_t::reverse_iterator rit;
480 for (rit = list->rbegin(); rit != list->rend(); rit++) {
481 ModelAction *prev = *rit;
482 if (prev->could_synchronize_with(act))
488 case ATOMIC_TRYLOCK: {
489 /* linear search: from most recent to oldest */
490 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
491 action_list_t::reverse_iterator rit;
492 for (rit = list->rbegin(); rit != list->rend(); rit++) {
493 ModelAction *prev = *rit;
494 if (act->is_conflicting_lock(prev))
499 case ATOMIC_UNLOCK: {
500 /* linear search: from most recent to oldest */
501 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
502 action_list_t::reverse_iterator rit;
503 for (rit = list->rbegin(); rit != list->rend(); rit++) {
504 ModelAction *prev = *rit;
505 if (!act->same_thread(prev)&&prev->is_failed_trylock())
511 /* linear search: from most recent to oldest */
512 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
513 action_list_t::reverse_iterator rit;
514 for (rit = list->rbegin(); rit != list->rend(); rit++) {
515 ModelAction *prev = *rit;
516 if (!act->same_thread(prev)&&prev->is_failed_trylock())
518 if (!act->same_thread(prev)&&prev->is_notify())
524 case ATOMIC_NOTIFY_ALL:
525 case ATOMIC_NOTIFY_ONE: {
526 /* linear search: from most recent to oldest */
527 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
528 action_list_t::reverse_iterator rit;
529 for (rit = list->rbegin(); rit != list->rend(); rit++) {
530 ModelAction *prev = *rit;
531 if (!act->same_thread(prev)&&prev->is_wait())
542 /** This method finds backtracking points where we should try to
543 * reorder the parameter ModelAction against.
545 * @param the ModelAction to find backtracking points for.
547 void ModelChecker::set_backtracking(ModelAction *act)
549 Thread *t = get_thread(act);
550 ModelAction * prev = get_last_conflict(act);
554 Node * node = prev->get_node()->get_parent();
556 int low_tid, high_tid;
557 if (node->is_enabled(t)) {
558 low_tid = id_to_int(act->get_tid());
559 high_tid = low_tid+1;
562 high_tid = get_num_threads();
565 for(int i = low_tid; i < high_tid; i++) {
566 thread_id_t tid = int_to_id(i);
568 /* Make sure this thread can be enabled here. */
569 if (i >= node->get_num_threads())
572 /* Don't backtrack into a point where the thread is disabled or sleeping. */
573 if (node->enabled_status(tid)!=THREAD_ENABLED)
576 /* Check if this has been explored already */
577 if (node->has_been_explored(tid))
580 /* See if fairness allows */
581 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
583 for(int t=0;t<node->get_num_threads();t++) {
584 thread_id_t tother=int_to_id(t);
585 if (node->is_enabled(tother) && node->has_priority(tother)) {
593 /* Cache the latest backtracking point */
594 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
595 priv->next_backtrack = prev;
597 /* If this is a new backtracking point, mark the tree */
598 if (!node->set_backtrack(tid))
600 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
601 id_to_int(prev->get_tid()),
602 id_to_int(t->get_id()));
611 * Returns last backtracking point. The model checker will explore a different
612 * path for this point in the next execution.
613 * @return The ModelAction at which the next execution should diverge.
615 ModelAction * ModelChecker::get_next_backtrack()
617 ModelAction *next = priv->next_backtrack;
618 priv->next_backtrack = NULL;
623 * Processes a read or rmw model action.
624 * @param curr is the read model action to process.
625 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
626 * @return True if processing this read updates the mo_graph.
628 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
630 uint64_t value = VALUE_NONE;
631 bool updated = false;
633 const ModelAction *reads_from = curr->get_node()->get_read_from();
634 if (reads_from != NULL) {
635 mo_graph->startChanges();
637 value = reads_from->get_value();
638 bool r_status = false;
640 if (!second_part_of_rmw) {
641 check_recency(curr, reads_from);
642 r_status = r_modification_order(curr, reads_from);
646 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
647 mo_graph->rollbackChanges();
648 too_many_reads = false;
652 curr->read_from(reads_from);
653 mo_graph->commitChanges();
654 mo_check_promises(curr->get_tid(), reads_from);
657 } else if (!second_part_of_rmw) {
658 /* Read from future value */
659 value = curr->get_node()->get_future_value();
660 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
661 curr->read_from(NULL);
662 Promise *valuepromise = new Promise(curr, value, expiration);
663 promises->push_back(valuepromise);
665 get_thread(curr)->set_return_value(value);
671 * Processes a lock, trylock, or unlock model action. @param curr is
672 * the read model action to process.
674 * The try lock operation checks whether the lock is taken. If not,
675 * it falls to the normal lock operation case. If so, it returns
678 * The lock operation has already been checked that it is enabled, so
679 * it just grabs the lock and synchronizes with the previous unlock.
681 * The unlock operation has to re-enable all of the threads that are
682 * waiting on the lock.
684 * @return True if synchronization was updated; false otherwise
686 bool ModelChecker::process_mutex(ModelAction *curr) {
687 std::mutex *mutex=NULL;
688 struct std::mutex_state *state=NULL;
690 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
691 mutex = (std::mutex *)curr->get_location();
692 state = mutex->get_state();
693 } else if(curr->is_wait()) {
694 mutex = (std::mutex *)curr->get_value();
695 state = mutex->get_state();
698 switch (curr->get_type()) {
699 case ATOMIC_TRYLOCK: {
700 bool success = !state->islocked;
701 curr->set_try_lock(success);
703 get_thread(curr)->set_return_value(0);
706 get_thread(curr)->set_return_value(1);
708 //otherwise fall into the lock case
710 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
711 assert_bug("Lock access before initialization");
712 state->islocked = true;
713 ModelAction *unlock = get_last_unlock(curr);
714 //synchronize with the previous unlock statement
715 if (unlock != NULL) {
716 curr->synchronize_with(unlock);
721 case ATOMIC_UNLOCK: {
723 state->islocked = false;
724 //wake up the other threads
725 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
726 //activate all the waiting threads
727 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
728 scheduler->wake(get_thread(*rit));
735 state->islocked = false;
736 //wake up the other threads
737 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
738 //activate all the waiting threads
739 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
740 scheduler->wake(get_thread(*rit));
743 //check whether we should go to sleep or not...simulate spurious failures
744 if (curr->get_node()->get_misc()==0) {
745 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
747 scheduler->sleep(get_current_thread());
751 case ATOMIC_NOTIFY_ALL: {
752 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
753 //activate all the waiting threads
754 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
755 scheduler->wake(get_thread(*rit));
760 case ATOMIC_NOTIFY_ONE: {
761 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
762 int wakeupthread=curr->get_node()->get_misc();
763 action_list_t::iterator it = waiters->begin();
764 advance(it, wakeupthread);
765 scheduler->wake(get_thread(*it));
777 * Process a write ModelAction
778 * @param curr The ModelAction to process
779 * @return True if the mo_graph was updated or promises were resolved
781 bool ModelChecker::process_write(ModelAction *curr)
783 bool updated_mod_order = w_modification_order(curr);
784 bool updated_promises = resolve_promises(curr);
786 if (promises->size() == 0) {
787 for (unsigned int i = 0; i < futurevalues->size(); i++) {
788 struct PendingFutureValue pfv = (*futurevalues)[i];
789 //Do more ambitious checks now that mo is more complete
790 if (mo_may_allow(pfv.writer, pfv.act)&&
791 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
792 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
793 priv->next_backtrack = pfv.act;
795 futurevalues->resize(0);
798 mo_graph->commitChanges();
799 mo_check_promises(curr->get_tid(), curr);
801 get_thread(curr)->set_return_value(VALUE_NONE);
802 return updated_mod_order || updated_promises;
806 * @brief Process the current action for thread-related activity
808 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
809 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
810 * synchronization, etc. This function is a no-op for non-THREAD actions
811 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
813 * @param curr The current action
814 * @return True if synchronization was updated or a thread completed
816 bool ModelChecker::process_thread_action(ModelAction *curr)
818 bool updated = false;
820 switch (curr->get_type()) {
821 case THREAD_CREATE: {
822 Thread *th = (Thread *)curr->get_location();
823 th->set_creation(curr);
827 Thread *blocking = (Thread *)curr->get_location();
828 ModelAction *act = get_last_action(blocking->get_id());
829 curr->synchronize_with(act);
830 updated = true; /* trigger rel-seq checks */
833 case THREAD_FINISH: {
834 Thread *th = get_thread(curr);
835 while (!th->wait_list_empty()) {
836 ModelAction *act = th->pop_wait_list();
837 scheduler->wake(get_thread(act));
840 updated = true; /* trigger rel-seq checks */
844 check_promises(curr->get_tid(), NULL, curr->get_cv());
855 * @brief Process the current action for release sequence fixup activity
857 * Performs model-checker release sequence fixups for the current action,
858 * forcing a single pending release sequence to break (with a given, potential
859 * "loose" write) or to complete (i.e., synchronize). If a pending release
860 * sequence forms a complete release sequence, then we must perform the fixup
861 * synchronization, mo_graph additions, etc.
863 * @param curr The current action; must be a release sequence fixup action
864 * @param work_queue The work queue to which to add work items as they are
867 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
869 const ModelAction *write = curr->get_node()->get_relseq_break();
870 struct release_seq *sequence = pending_rel_seqs->back();
871 pending_rel_seqs->pop_back();
873 ModelAction *acquire = sequence->acquire;
874 const ModelAction *rf = sequence->rf;
875 const ModelAction *release = sequence->release;
879 ASSERT(release->same_thread(rf));
883 * @todo Forcing a synchronization requires that we set
884 * modification order constraints. For instance, we can't allow
885 * a fixup sequence in which two separate read-acquire
886 * operations read from the same sequence, where the first one
887 * synchronizes and the other doesn't. Essentially, we can't
888 * allow any writes to insert themselves between 'release' and
892 /* Must synchronize */
893 if (!acquire->synchronize_with(release)) {
894 set_bad_synchronization();
897 /* Re-check all pending release sequences */
898 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
899 /* Re-check act for mo_graph edges */
900 work_queue->push_back(MOEdgeWorkEntry(acquire));
902 /* propagate synchronization to later actions */
903 action_list_t::reverse_iterator rit = action_trace->rbegin();
904 for (; (*rit) != acquire; rit++) {
905 ModelAction *propagate = *rit;
906 if (acquire->happens_before(propagate)) {
907 propagate->synchronize_with(acquire);
908 /* Re-check 'propagate' for mo_graph edges */
909 work_queue->push_back(MOEdgeWorkEntry(propagate));
913 /* Break release sequence with new edges:
914 * release --mo--> write --mo--> rf */
915 mo_graph->addEdge(release, write);
916 mo_graph->addEdge(write, rf);
919 /* See if we have realized a data race */
920 if (checkDataRaces())
921 assert_bug("Datarace");
925 * Initialize the current action by performing one or more of the following
926 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
927 * in the NodeStack, manipulating backtracking sets, allocating and
928 * initializing clock vectors, and computing the promises to fulfill.
930 * @param curr The current action, as passed from the user context; may be
931 * freed/invalidated after the execution of this function, with a different
932 * action "returned" its place (pass-by-reference)
933 * @return True if curr is a newly-explored action; false otherwise
935 bool ModelChecker::initialize_curr_action(ModelAction **curr)
937 ModelAction *newcurr;
939 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
940 newcurr = process_rmw(*curr);
943 if (newcurr->is_rmw())
944 compute_promises(newcurr);
950 (*curr)->set_seq_number(get_next_seq_num());
952 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
954 /* First restore type and order in case of RMW operation */
955 if ((*curr)->is_rmwr())
956 newcurr->copy_typeandorder(*curr);
958 ASSERT((*curr)->get_location() == newcurr->get_location());
959 newcurr->copy_from_new(*curr);
961 /* Discard duplicate ModelAction; use action from NodeStack */
964 /* Always compute new clock vector */
965 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
968 return false; /* Action was explored previously */
972 /* Always compute new clock vector */
973 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
975 * Perform one-time actions when pushing new ModelAction onto
978 if (newcurr->is_write())
979 compute_promises(newcurr);
980 else if (newcurr->is_relseq_fixup())
981 compute_relseq_breakwrites(newcurr);
982 else if (newcurr->is_wait())
983 newcurr->get_node()->set_misc_max(2);
984 else if (newcurr->is_notify_one()) {
985 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
987 return true; /* This was a new ModelAction */
992 * @brief Check whether a model action is enabled.
994 * Checks whether a lock or join operation would be successful (i.e., is the
995 * lock already locked, or is the joined thread already complete). If not, put
996 * the action in a waiter list.
998 * @param curr is the ModelAction to check whether it is enabled.
999 * @return a bool that indicates whether the action is enabled.
1001 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1002 if (curr->is_lock()) {
1003 std::mutex * lock = (std::mutex *)curr->get_location();
1004 struct std::mutex_state * state = lock->get_state();
1005 if (state->islocked) {
1006 //Stick the action in the appropriate waiting queue
1007 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1010 } else if (curr->get_type() == THREAD_JOIN) {
1011 Thread *blocking = (Thread *)curr->get_location();
1012 if (!blocking->is_complete()) {
1013 blocking->push_wait_list(curr);
1022 * Stores the ModelAction for the current thread action. Call this
1023 * immediately before switching from user- to system-context to pass
1024 * data between them.
1025 * @param act The ModelAction created by the user-thread action
1027 void ModelChecker::set_current_action(ModelAction *act) {
1028 priv->current_action = act;
1032 * This is the heart of the model checker routine. It performs model-checking
1033 * actions corresponding to a given "current action." Among other processes, it
1034 * calculates reads-from relationships, updates synchronization clock vectors,
1035 * forms a memory_order constraints graph, and handles replay/backtrack
1036 * execution when running permutations of previously-observed executions.
1038 * @param curr The current action to process
1039 * @return The next Thread that must be executed. May be NULL if ModelChecker
1040 * makes no choice (e.g., according to replay execution, combining RMW actions,
1043 Thread * ModelChecker::check_current_action(ModelAction *curr)
1046 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1048 if (!check_action_enabled(curr)) {
1049 /* Make the execution look like we chose to run this action
1050 * much later, when a lock/join can succeed */
1051 get_current_thread()->set_pending(curr);
1052 scheduler->sleep(get_current_thread());
1053 return get_next_thread(NULL);
1056 bool newly_explored = initialize_curr_action(&curr);
1058 wake_up_sleeping_actions(curr);
1060 /* Add the action to lists before any other model-checking tasks */
1061 if (!second_part_of_rmw)
1062 add_action_to_lists(curr);
1064 /* Build may_read_from set for newly-created actions */
1065 if (newly_explored && curr->is_read())
1066 build_reads_from_past(curr);
1068 /* Initialize work_queue with the "current action" work */
1069 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1070 while (!work_queue.empty() && !has_asserted()) {
1071 WorkQueueEntry work = work_queue.front();
1072 work_queue.pop_front();
1074 switch (work.type) {
1075 case WORK_CHECK_CURR_ACTION: {
1076 ModelAction *act = work.action;
1077 bool update = false; /* update this location's release seq's */
1078 bool update_all = false; /* update all release seq's */
1080 if (process_thread_action(curr))
1083 if (act->is_read() && process_read(act, second_part_of_rmw))
1086 if (act->is_write() && process_write(act))
1089 if (act->is_mutex_op() && process_mutex(act))
1092 if (act->is_relseq_fixup())
1093 process_relseq_fixup(curr, &work_queue);
1096 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1098 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1101 case WORK_CHECK_RELEASE_SEQ:
1102 resolve_release_sequences(work.location, &work_queue);
1104 case WORK_CHECK_MO_EDGES: {
1105 /** @todo Complete verification of work_queue */
1106 ModelAction *act = work.action;
1107 bool updated = false;
1109 if (act->is_read()) {
1110 const ModelAction *rf = act->get_reads_from();
1111 if (rf != NULL && r_modification_order(act, rf))
1114 if (act->is_write()) {
1115 if (w_modification_order(act))
1118 mo_graph->commitChanges();
1121 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1130 check_curr_backtracking(curr);
1131 set_backtracking(curr);
1132 return get_next_thread(curr);
1135 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1136 Node *currnode = curr->get_node();
1137 Node *parnode = currnode->get_parent();
1139 if ((!parnode->backtrack_empty() ||
1140 !currnode->misc_empty() ||
1141 !currnode->read_from_empty() ||
1142 !currnode->future_value_empty() ||
1143 !currnode->promise_empty() ||
1144 !currnode->relseq_break_empty())
1145 && (!priv->next_backtrack ||
1146 *curr > *priv->next_backtrack)) {
1147 priv->next_backtrack = curr;
1151 bool ModelChecker::promises_expired() const
1153 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1154 Promise *promise = (*promises)[promise_index];
1155 if (promise->get_expiration()<priv->used_sequence_numbers) {
1162 /** @return whether the current partial trace must be a prefix of a
1163 * feasible trace. */
1164 bool ModelChecker::isfeasibleprefix() const
1166 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1169 /** @return whether the current partial trace is feasible. */
1170 bool ModelChecker::isfeasible() const
1172 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1173 DEBUG("Infeasible: RMW violation\n");
1175 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1178 /** @return whether the current partial trace is feasible other than
1179 * multiple RMW reading from the same store. */
1180 bool ModelChecker::isfeasibleotherthanRMW() const
1182 if (DBG_ENABLED()) {
1183 if (mo_graph->checkForCycles())
1184 DEBUG("Infeasible: modification order cycles\n");
1186 DEBUG("Infeasible: failed promise\n");
1188 DEBUG("Infeasible: too many reads\n");
1189 if (bad_synchronization)
1190 DEBUG("Infeasible: bad synchronization ordering\n");
1191 if (promises_expired())
1192 DEBUG("Infeasible: promises expired\n");
1194 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1197 /** Returns whether the current completed trace is feasible. */
1198 bool ModelChecker::isfinalfeasible() const
1200 if (DBG_ENABLED() && promises->size() != 0)
1201 DEBUG("Infeasible: unrevolved promises\n");
1203 return isfeasible() && promises->size() == 0;
1206 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1207 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1208 ModelAction *lastread = get_last_action(act->get_tid());
1209 lastread->process_rmw(act);
1210 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1211 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1212 mo_graph->commitChanges();
1218 * Checks whether a thread has read from the same write for too many times
1219 * without seeing the effects of a later write.
1222 * 1) there must a different write that we could read from that would satisfy the modification order,
1223 * 2) we must have read from the same value in excess of maxreads times, and
1224 * 3) that other write must have been in the reads_from set for maxreads times.
1226 * If so, we decide that the execution is no longer feasible.
1228 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1229 if (params.maxreads != 0) {
1231 if (curr->get_node()->get_read_from_size() <= 1)
1233 //Must make sure that execution is currently feasible... We could
1234 //accidentally clear by rolling back
1237 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1238 int tid = id_to_int(curr->get_tid());
1241 if ((int)thrd_lists->size() <= tid)
1243 action_list_t *list = &(*thrd_lists)[tid];
1245 action_list_t::reverse_iterator rit = list->rbegin();
1246 /* Skip past curr */
1247 for (; (*rit) != curr; rit++)
1249 /* go past curr now */
1252 action_list_t::reverse_iterator ritcopy = rit;
1253 //See if we have enough reads from the same value
1255 for (; count < params.maxreads; rit++,count++) {
1256 if (rit==list->rend())
1258 ModelAction *act = *rit;
1259 if (!act->is_read())
1262 if (act->get_reads_from() != rf)
1264 if (act->get_node()->get_read_from_size() <= 1)
1267 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1269 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1271 //Need a different write
1275 /* Test to see whether this is a feasible write to read from*/
1276 mo_graph->startChanges();
1277 r_modification_order(curr, write);
1278 bool feasiblereadfrom = isfeasible();
1279 mo_graph->rollbackChanges();
1281 if (!feasiblereadfrom)
1285 bool feasiblewrite = true;
1286 //new we need to see if this write works for everyone
1288 for (int loop = count; loop>0; loop--,rit++) {
1289 ModelAction *act=*rit;
1290 bool foundvalue = false;
1291 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1292 if (act->get_node()->get_read_from_at(j)==write) {
1298 feasiblewrite = false;
1302 if (feasiblewrite) {
1303 too_many_reads = true;
1311 * Updates the mo_graph with the constraints imposed from the current
1314 * Basic idea is the following: Go through each other thread and find
1315 * the lastest action that happened before our read. Two cases:
1317 * (1) The action is a write => that write must either occur before
1318 * the write we read from or be the write we read from.
1320 * (2) The action is a read => the write that that action read from
1321 * must occur before the write we read from or be the same write.
1323 * @param curr The current action. Must be a read.
1324 * @param rf The action that curr reads from. Must be a write.
1325 * @return True if modification order edges were added; false otherwise
1327 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1329 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1332 ASSERT(curr->is_read());
1334 /* Iterate over all threads */
1335 for (i = 0; i < thrd_lists->size(); i++) {
1336 /* Iterate over actions in thread, starting from most recent */
1337 action_list_t *list = &(*thrd_lists)[i];
1338 action_list_t::reverse_iterator rit;
1339 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1340 ModelAction *act = *rit;
1343 * Include at most one act per-thread that "happens
1344 * before" curr. Don't consider reflexively.
1346 if (act->happens_before(curr) && act != curr) {
1347 if (act->is_write()) {
1349 mo_graph->addEdge(act, rf);
1353 const ModelAction *prevreadfrom = act->get_reads_from();
1354 //if the previous read is unresolved, keep going...
1355 if (prevreadfrom == NULL)
1358 if (rf != prevreadfrom) {
1359 mo_graph->addEdge(prevreadfrom, rf);
1371 /** This method fixes up the modification order when we resolve a
1372 * promises. The basic problem is that actions that occur after the
1373 * read curr could not property add items to the modification order
1376 * So for each thread, we find the earliest item that happens after
1377 * the read curr. This is the item we have to fix up with additional
1378 * constraints. If that action is write, we add a MO edge between
1379 * the Action rf and that action. If the action is a read, we add a
1380 * MO edge between the Action rf, and whatever the read accessed.
1382 * @param curr is the read ModelAction that we are fixing up MO edges for.
1383 * @param rf is the write ModelAction that curr reads from.
1386 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1388 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1390 ASSERT(curr->is_read());
1392 /* Iterate over all threads */
1393 for (i = 0; i < thrd_lists->size(); i++) {
1394 /* Iterate over actions in thread, starting from most recent */
1395 action_list_t *list = &(*thrd_lists)[i];
1396 action_list_t::reverse_iterator rit;
1397 ModelAction *lastact = NULL;
1399 /* Find last action that happens after curr that is either not curr or a rmw */
1400 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1401 ModelAction *act = *rit;
1402 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1408 /* Include at most one act per-thread that "happens before" curr */
1409 if (lastact != NULL) {
1410 if (lastact==curr) {
1411 //Case 1: The resolved read is a RMW, and we need to make sure
1412 //that the write portion of the RMW mod order after rf
1414 mo_graph->addEdge(rf, lastact);
1415 } else if (lastact->is_read()) {
1416 //Case 2: The resolved read is a normal read and the next
1417 //operation is a read, and we need to make sure the value read
1418 //is mod ordered after rf
1420 const ModelAction *postreadfrom = lastact->get_reads_from();
1421 if (postreadfrom != NULL&&rf != postreadfrom)
1422 mo_graph->addEdge(rf, postreadfrom);
1424 //Case 3: The resolved read is a normal read and the next
1425 //operation is a write, and we need to make sure that the
1426 //write is mod ordered after rf
1428 mo_graph->addEdge(rf, lastact);
1436 * Updates the mo_graph with the constraints imposed from the current write.
1438 * Basic idea is the following: Go through each other thread and find
1439 * the lastest action that happened before our write. Two cases:
1441 * (1) The action is a write => that write must occur before
1444 * (2) The action is a read => the write that that action read from
1445 * must occur before the current write.
1447 * This method also handles two other issues:
1449 * (I) Sequential Consistency: Making sure that if the current write is
1450 * seq_cst, that it occurs after the previous seq_cst write.
1452 * (II) Sending the write back to non-synchronizing reads.
1454 * @param curr The current action. Must be a write.
1455 * @return True if modification order edges were added; false otherwise
1457 bool ModelChecker::w_modification_order(ModelAction *curr)
1459 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1462 ASSERT(curr->is_write());
1464 if (curr->is_seqcst()) {
1465 /* We have to at least see the last sequentially consistent write,
1466 so we are initialized. */
1467 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1468 if (last_seq_cst != NULL) {
1469 mo_graph->addEdge(last_seq_cst, curr);
1474 /* Iterate over all threads */
1475 for (i = 0; i < thrd_lists->size(); i++) {
1476 /* Iterate over actions in thread, starting from most recent */
1477 action_list_t *list = &(*thrd_lists)[i];
1478 action_list_t::reverse_iterator rit;
1479 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1480 ModelAction *act = *rit;
1483 * 1) If RMW and it actually read from something, then we
1484 * already have all relevant edges, so just skip to next
1487 * 2) If RMW and it didn't read from anything, we should
1488 * whatever edge we can get to speed up convergence.
1490 * 3) If normal write, we need to look at earlier actions, so
1491 * continue processing list.
1493 if (curr->is_rmw()) {
1494 if (curr->get_reads_from()!=NULL)
1503 * Include at most one act per-thread that "happens
1506 if (act->happens_before(curr)) {
1508 * Note: if act is RMW, just add edge:
1510 * The following edge should be handled elsewhere:
1511 * readfrom(act) --mo--> act
1513 if (act->is_write())
1514 mo_graph->addEdge(act, curr);
1515 else if (act->is_read()) {
1516 //if previous read accessed a null, just keep going
1517 if (act->get_reads_from() == NULL)
1519 mo_graph->addEdge(act->get_reads_from(), curr);
1523 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1524 !act->same_thread(curr)) {
1525 /* We have an action that:
1526 (1) did not happen before us
1527 (2) is a read and we are a write
1528 (3) cannot synchronize with us
1529 (4) is in a different thread
1531 that read could potentially read from our write. Note that
1532 these checks are overly conservative at this point, we'll
1533 do more checks before actually removing the
1537 if (thin_air_constraint_may_allow(curr, act)) {
1539 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1540 struct PendingFutureValue pfv = {curr,act};
1541 futurevalues->push_back(pfv);
1551 /** Arbitrary reads from the future are not allowed. Section 29.3
1552 * part 9 places some constraints. This method checks one result of constraint
1553 * constraint. Others require compiler support. */
1554 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1555 if (!writer->is_rmw())
1558 if (!reader->is_rmw())
1561 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1562 if (search == reader)
1564 if (search->get_tid() == reader->get_tid() &&
1565 search->happens_before(reader))
1573 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1574 * some constraints. This method checks one the following constraint (others
1575 * require compiler support):
1577 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1579 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1581 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1583 /* Iterate over all threads */
1584 for (i = 0; i < thrd_lists->size(); i++) {
1585 const ModelAction *write_after_read = NULL;
1587 /* Iterate over actions in thread, starting from most recent */
1588 action_list_t *list = &(*thrd_lists)[i];
1589 action_list_t::reverse_iterator rit;
1590 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1591 ModelAction *act = *rit;
1593 if (!reader->happens_before(act))
1595 else if (act->is_write())
1596 write_after_read = act;
1597 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1598 write_after_read = act->get_reads_from();
1602 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1609 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1610 * The ModelAction under consideration is expected to be taking part in
1611 * release/acquire synchronization as an object of the "reads from" relation.
1612 * Note that this can only provide release sequence support for RMW chains
1613 * which do not read from the future, as those actions cannot be traced until
1614 * their "promise" is fulfilled. Similarly, we may not even establish the
1615 * presence of a release sequence with certainty, as some modification order
1616 * constraints may be decided further in the future. Thus, this function
1617 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1618 * and a boolean representing certainty.
1620 * @param rf The action that might be part of a release sequence. Must be a
1622 * @param release_heads A pass-by-reference style return parameter. After
1623 * execution of this function, release_heads will contain the heads of all the
1624 * relevant release sequences, if any exists with certainty
1625 * @param pending A pass-by-reference style return parameter which is only used
1626 * when returning false (i.e., uncertain). Returns most information regarding
1627 * an uncertain release sequence, including any write operations that might
1628 * break the sequence.
1629 * @return true, if the ModelChecker is certain that release_heads is complete;
1632 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1633 rel_heads_list_t *release_heads,
1634 struct release_seq *pending) const
1636 /* Only check for release sequences if there are no cycles */
1637 if (mo_graph->checkForCycles())
1641 ASSERT(rf->is_write());
1643 if (rf->is_release())
1644 release_heads->push_back(rf);
1646 break; /* End of RMW chain */
1648 /** @todo Need to be smarter here... In the linux lock
1649 * example, this will run to the beginning of the program for
1651 /** @todo The way to be smarter here is to keep going until 1
1652 * thread has a release preceded by an acquire and you've seen
1655 /* acq_rel RMW is a sufficient stopping condition */
1656 if (rf->is_acquire() && rf->is_release())
1657 return true; /* complete */
1659 rf = rf->get_reads_from();
1662 /* read from future: need to settle this later */
1664 return false; /* incomplete */
1667 if (rf->is_release())
1668 return true; /* complete */
1670 /* else relaxed write; check modification order for contiguous subsequence
1671 * -> rf must be same thread as release */
1672 int tid = id_to_int(rf->get_tid());
1673 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1674 action_list_t *list = &(*thrd_lists)[tid];
1675 action_list_t::const_reverse_iterator rit;
1677 /* Find rf in the thread list */
1678 rit = std::find(list->rbegin(), list->rend(), rf);
1679 ASSERT(rit != list->rend());
1681 /* Find the last write/release */
1682 for (; rit != list->rend(); rit++)
1683 if ((*rit)->is_release())
1685 if (rit == list->rend()) {
1686 /* No write-release in this thread */
1687 return true; /* complete */
1689 ModelAction *release = *rit;
1691 ASSERT(rf->same_thread(release));
1693 pending->writes.clear();
1695 bool certain = true;
1696 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1697 if (id_to_int(rf->get_tid()) == (int)i)
1699 list = &(*thrd_lists)[i];
1701 /* Can we ensure no future writes from this thread may break
1702 * the release seq? */
1703 bool future_ordered = false;
1705 ModelAction *last = get_last_action(int_to_id(i));
1706 Thread *th = get_thread(int_to_id(i));
1707 if ((last && rf->happens_before(last)) ||
1710 future_ordered = true;
1712 ASSERT(!th->is_model_thread() || future_ordered);
1714 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1715 const ModelAction *act = *rit;
1716 /* Reach synchronization -> this thread is complete */
1717 if (act->happens_before(release))
1719 if (rf->happens_before(act)) {
1720 future_ordered = true;
1724 /* Only non-RMW writes can break release sequences */
1725 if (!act->is_write() || act->is_rmw())
1728 /* Check modification order */
1729 if (mo_graph->checkReachable(rf, act)) {
1730 /* rf --mo--> act */
1731 future_ordered = true;
1734 if (mo_graph->checkReachable(act, release))
1735 /* act --mo--> release */
1737 if (mo_graph->checkReachable(release, act) &&
1738 mo_graph->checkReachable(act, rf)) {
1739 /* release --mo-> act --mo--> rf */
1740 return true; /* complete */
1742 /* act may break release sequence */
1743 pending->writes.push_back(act);
1746 if (!future_ordered)
1747 certain = false; /* This thread is uncertain */
1751 release_heads->push_back(release);
1752 pending->writes.clear();
1754 pending->release = release;
1761 * A public interface for getting the release sequence head(s) with which a
1762 * given ModelAction must synchronize. This function only returns a non-empty
1763 * result when it can locate a release sequence head with certainty. Otherwise,
1764 * it may mark the internal state of the ModelChecker so that it will handle
1765 * the release sequence at a later time, causing @a act to update its
1766 * synchronization at some later point in execution.
1767 * @param act The 'acquire' action that may read from a release sequence
1768 * @param release_heads A pass-by-reference return parameter. Will be filled
1769 * with the head(s) of the release sequence(s), if they exists with certainty.
1770 * @see ModelChecker::release_seq_heads
1772 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1774 const ModelAction *rf = act->get_reads_from();
1775 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1776 sequence->acquire = act;
1778 if (!release_seq_heads(rf, release_heads, sequence)) {
1779 /* add act to 'lazy checking' list */
1780 pending_rel_seqs->push_back(sequence);
1782 snapshot_free(sequence);
1787 * Attempt to resolve all stashed operations that might synchronize with a
1788 * release sequence for a given location. This implements the "lazy" portion of
1789 * determining whether or not a release sequence was contiguous, since not all
1790 * modification order information is present at the time an action occurs.
1792 * @param location The location/object that should be checked for release
1793 * sequence resolutions. A NULL value means to check all locations.
1794 * @param work_queue The work queue to which to add work items as they are
1796 * @return True if any updates occurred (new synchronization, new mo_graph
1799 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1801 bool updated = false;
1802 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1803 while (it != pending_rel_seqs->end()) {
1804 struct release_seq *pending = *it;
1805 ModelAction *act = pending->acquire;
1807 /* Only resolve sequences on the given location, if provided */
1808 if (location && act->get_location() != location) {
1813 const ModelAction *rf = act->get_reads_from();
1814 rel_heads_list_t release_heads;
1816 complete = release_seq_heads(rf, &release_heads, pending);
1817 for (unsigned int i = 0; i < release_heads.size(); i++) {
1818 if (!act->has_synchronized_with(release_heads[i])) {
1819 if (act->synchronize_with(release_heads[i]))
1822 set_bad_synchronization();
1827 /* Re-check all pending release sequences */
1828 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1829 /* Re-check act for mo_graph edges */
1830 work_queue->push_back(MOEdgeWorkEntry(act));
1832 /* propagate synchronization to later actions */
1833 action_list_t::reverse_iterator rit = action_trace->rbegin();
1834 for (; (*rit) != act; rit++) {
1835 ModelAction *propagate = *rit;
1836 if (act->happens_before(propagate)) {
1837 propagate->synchronize_with(act);
1838 /* Re-check 'propagate' for mo_graph edges */
1839 work_queue->push_back(MOEdgeWorkEntry(propagate));
1844 it = pending_rel_seqs->erase(it);
1845 snapshot_free(pending);
1851 // If we resolved promises or data races, see if we have realized a data race.
1852 if (checkDataRaces())
1853 assert_bug("Datarace");
1859 * Performs various bookkeeping operations for the current ModelAction. For
1860 * instance, adds action to the per-object, per-thread action vector and to the
1861 * action trace list of all thread actions.
1863 * @param act is the ModelAction to add.
1865 void ModelChecker::add_action_to_lists(ModelAction *act)
1867 int tid = id_to_int(act->get_tid());
1868 action_trace->push_back(act);
1870 get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1872 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1873 if (tid >= (int)vec->size())
1874 vec->resize(priv->next_thread_id);
1875 (*vec)[tid].push_back(act);
1877 if ((int)thrd_last_action->size() <= tid)
1878 thrd_last_action->resize(get_num_threads());
1879 (*thrd_last_action)[tid] = act;
1881 if (act->is_wait()) {
1882 void *mutex_loc=(void *) act->get_value();
1883 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
1885 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
1886 if (tid >= (int)vec->size())
1887 vec->resize(priv->next_thread_id);
1888 (*vec)[tid].push_back(act);
1890 if ((int)thrd_last_action->size() <= tid)
1891 thrd_last_action->resize(get_num_threads());
1892 (*thrd_last_action)[tid] = act;
1897 * @brief Get the last action performed by a particular Thread
1898 * @param tid The thread ID of the Thread in question
1899 * @return The last action in the thread
1901 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1903 int threadid = id_to_int(tid);
1904 if (threadid < (int)thrd_last_action->size())
1905 return (*thrd_last_action)[id_to_int(tid)];
1911 * Gets the last memory_order_seq_cst write (in the total global sequence)
1912 * performed on a particular object (i.e., memory location), not including the
1914 * @param curr The current ModelAction; also denotes the object location to
1916 * @return The last seq_cst write
1918 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1920 void *location = curr->get_location();
1921 action_list_t *list = get_safe_ptr_action(obj_map, location);
1922 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1923 action_list_t::reverse_iterator rit;
1924 for (rit = list->rbegin(); rit != list->rend(); rit++)
1925 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1931 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1932 * location). This function identifies the mutex according to the current
1933 * action, which is presumed to perform on the same mutex.
1934 * @param curr The current ModelAction; also denotes the object location to
1936 * @return The last unlock operation
1938 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1940 void *location = curr->get_location();
1941 action_list_t *list = get_safe_ptr_action(obj_map, location);
1942 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1943 action_list_t::reverse_iterator rit;
1944 for (rit = list->rbegin(); rit != list->rend(); rit++)
1945 if ((*rit)->is_unlock() || (*rit)->is_wait())
1950 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1952 ModelAction *parent = get_last_action(tid);
1954 parent = get_thread(tid)->get_creation();
1959 * Returns the clock vector for a given thread.
1960 * @param tid The thread whose clock vector we want
1961 * @return Desired clock vector
1963 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1965 return get_parent_action(tid)->get_cv();
1969 * Resolve a set of Promises with a current write. The set is provided in the
1970 * Node corresponding to @a write.
1971 * @param write The ModelAction that is fulfilling Promises
1972 * @return True if promises were resolved; false otherwise
1974 bool ModelChecker::resolve_promises(ModelAction *write)
1976 bool resolved = false;
1977 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1979 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1980 Promise *promise = (*promises)[promise_index];
1981 if (write->get_node()->get_promise(i)) {
1982 ModelAction *read = promise->get_action();
1983 if (read->is_rmw()) {
1984 mo_graph->addRMWEdge(write, read);
1986 read->read_from(write);
1987 //First fix up the modification order for actions that happened
1989 r_modification_order(read, write);
1990 //Next fix up the modification order for actions that happened
1992 post_r_modification_order(read, write);
1993 //Make sure the promise's value matches the write's value
1994 ASSERT(promise->get_value() == write->get_value());
1997 promises->erase(promises->begin() + promise_index);
1998 threads_to_check.push_back(read->get_tid());
2005 //Check whether reading these writes has made threads unable to
2008 for(unsigned int i=0;i<threads_to_check.size();i++)
2009 mo_check_promises(threads_to_check[i], write);
2015 * Compute the set of promises that could potentially be satisfied by this
2016 * action. Note that the set computation actually appears in the Node, not in
2018 * @param curr The ModelAction that may satisfy promises
2020 void ModelChecker::compute_promises(ModelAction *curr)
2022 for (unsigned int i = 0; i < promises->size(); i++) {
2023 Promise *promise = (*promises)[i];
2024 const ModelAction *act = promise->get_action();
2025 if (!act->happens_before(curr) &&
2027 !act->could_synchronize_with(curr) &&
2028 !act->same_thread(curr) &&
2029 act->get_location() == curr->get_location() &&
2030 promise->get_value() == curr->get_value()) {
2031 curr->get_node()->set_promise(i, act->is_rmw());
2036 /** Checks promises in response to change in ClockVector Threads. */
2037 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2039 for (unsigned int i = 0; i < promises->size(); i++) {
2040 Promise *promise = (*promises)[i];
2041 const ModelAction *act = promise->get_action();
2042 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
2043 merge_cv->synchronized_since(act)) {
2044 if (promise->increment_threads(tid)) {
2045 //Promise has failed
2046 failed_promise = true;
2053 void ModelChecker::check_promises_thread_disabled() {
2054 for (unsigned int i = 0; i < promises->size(); i++) {
2055 Promise *promise = (*promises)[i];
2056 if (promise->check_promise()) {
2057 failed_promise = true;
2063 /** Checks promises in response to addition to modification order for threads.
2065 * pthread is the thread that performed the read that created the promise
2067 * pread is the read that created the promise
2069 * pwrite is either the first write to same location as pread by
2070 * pthread that is sequenced after pread or the value read by the
2071 * first read to the same lcoation as pread by pthread that is
2072 * sequenced after pread..
2074 * 1. If tid=pthread, then we check what other threads are reachable
2075 * through the mode order starting with pwrite. Those threads cannot
2076 * perform a write that will resolve the promise due to modification
2077 * order constraints.
2079 * 2. If the tid is not pthread, we check whether pwrite can reach the
2080 * action write through the modification order. If so, that thread
2081 * cannot perform a future write that will resolve the promise due to
2082 * modificatin order constraints.
2084 * @parem tid The thread that either read from the model action
2085 * write, or actually did the model action write.
2087 * @parem write The ModelAction representing the relevant write.
2090 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
2091 void * location = write->get_location();
2092 for (unsigned int i = 0; i < promises->size(); i++) {
2093 Promise *promise = (*promises)[i];
2094 const ModelAction *act = promise->get_action();
2096 //Is this promise on the same location?
2097 if ( act->get_location() != location )
2100 //same thread as the promise
2101 if ( act->get_tid()==tid ) {
2103 //do we have a pwrite for the promise, if not, set it
2104 if (promise->get_write() == NULL ) {
2105 promise->set_write(write);
2106 //The pwrite cannot happen before the promise
2107 if (write->happens_before(act) && (write != act)) {
2108 failed_promise = true;
2112 if (mo_graph->checkPromise(write, promise)) {
2113 failed_promise = true;
2118 //Don't do any lookups twice for the same thread
2119 if (promise->has_sync_thread(tid))
2122 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
2123 if (promise->increment_threads(tid)) {
2124 failed_promise = true;
2132 * Compute the set of writes that may break the current pending release
2133 * sequence. This information is extracted from previou release sequence
2136 * @param curr The current ModelAction. Must be a release sequence fixup
2139 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2141 if (pending_rel_seqs->empty())
2144 struct release_seq *pending = pending_rel_seqs->back();
2145 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2146 const ModelAction *write = pending->writes[i];
2147 curr->get_node()->add_relseq_break(write);
2150 /* NULL means don't break the sequence; just synchronize */
2151 curr->get_node()->add_relseq_break(NULL);
2155 * Build up an initial set of all past writes that this 'read' action may read
2156 * from. This set is determined by the clock vector's "happens before"
2158 * @param curr is the current ModelAction that we are exploring; it must be a
2161 void ModelChecker::build_reads_from_past(ModelAction *curr)
2163 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2165 ASSERT(curr->is_read());
2167 ModelAction *last_seq_cst = NULL;
2169 /* Track whether this object has been initialized */
2170 bool initialized = false;
2172 if (curr->is_seqcst()) {
2173 last_seq_cst = get_last_seq_cst(curr);
2174 /* We have to at least see the last sequentially consistent write,
2175 so we are initialized. */
2176 if (last_seq_cst != NULL)
2180 /* Iterate over all threads */
2181 for (i = 0; i < thrd_lists->size(); i++) {
2182 /* Iterate over actions in thread, starting from most recent */
2183 action_list_t *list = &(*thrd_lists)[i];
2184 action_list_t::reverse_iterator rit;
2185 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2186 ModelAction *act = *rit;
2188 /* Only consider 'write' actions */
2189 if (!act->is_write() || act == curr)
2192 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2193 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2194 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2195 DEBUG("Adding action to may_read_from:\n");
2196 if (DBG_ENABLED()) {
2200 curr->get_node()->add_read_from(act);
2204 /* Include at most one act per-thread that "happens before" curr */
2205 if (act->happens_before(curr)) {
2213 assert_bug("May read from uninitialized atomic");
2215 if (DBG_ENABLED() || !initialized) {
2216 printf("Reached read action:\n");
2218 printf("Printing may_read_from\n");
2219 curr->get_node()->print_may_read_from();
2220 printf("End printing may_read_from\n");
2224 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2226 Node *prevnode=write->get_node()->get_parent();
2228 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2229 if (write->is_release()&&thread_sleep)
2231 if (!write->is_rmw()) {
2234 if (write->get_reads_from()==NULL)
2236 write=write->get_reads_from();
2240 static void print_list(action_list_t *list)
2242 action_list_t::iterator it;
2244 printf("---------------------------------------------------------------------\n");
2246 unsigned int hash=0;
2248 for (it = list->begin(); it != list->end(); it++) {
2250 hash=hash^(hash<<3)^((*it)->hash());
2252 printf("HASH %u\n", hash);
2253 printf("---------------------------------------------------------------------\n");
2256 #if SUPPORT_MOD_ORDER_DUMP
2257 void ModelChecker::dumpGraph(char *filename) {
2259 sprintf(buffer, "%s.dot",filename);
2260 FILE *file=fopen(buffer, "w");
2261 fprintf(file, "digraph %s {\n",filename);
2262 mo_graph->dumpNodes(file);
2263 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2265 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2266 ModelAction *action=*it;
2267 if (action->is_read()) {
2268 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2269 if (action->get_reads_from()!=NULL)
2270 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2272 if (thread_array[action->get_tid()] != NULL) {
2273 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2276 thread_array[action->get_tid()]=action;
2278 fprintf(file,"}\n");
2279 model_free(thread_array);
2284 void ModelChecker::print_summary()
2286 #if SUPPORT_MOD_ORDER_DUMP
2288 char buffername[100];
2289 sprintf(buffername, "exec%04u", stats.num_total);
2290 mo_graph->dumpGraphToFile(buffername);
2291 sprintf(buffername, "graph%04u", stats.num_total);
2292 dumpGraph(buffername);
2295 if (!isfinalfeasible())
2296 printf("INFEASIBLE EXECUTION!\n");
2297 print_list(action_trace);
2302 * Add a Thread to the system for the first time. Should only be called once
2304 * @param t The Thread to add
2306 void ModelChecker::add_thread(Thread *t)
2308 thread_map->put(id_to_int(t->get_id()), t);
2309 scheduler->add_thread(t);
2313 * Removes a thread from the scheduler.
2314 * @param the thread to remove.
2316 void ModelChecker::remove_thread(Thread *t)
2318 scheduler->remove_thread(t);
2322 * @brief Get a Thread reference by its ID
2323 * @param tid The Thread's ID
2324 * @return A Thread reference
2326 Thread * ModelChecker::get_thread(thread_id_t tid) const
2328 return thread_map->get(id_to_int(tid));
2332 * @brief Get a reference to the Thread in which a ModelAction was executed
2333 * @param act The ModelAction
2334 * @return A Thread reference
2336 Thread * ModelChecker::get_thread(ModelAction *act) const
2338 return get_thread(act->get_tid());
2342 * @brief Check if a Thread is currently enabled
2343 * @param t The Thread to check
2344 * @return True if the Thread is currently enabled
2346 bool ModelChecker::is_enabled(Thread *t) const
2348 return scheduler->is_enabled(t);
2352 * @brief Check if a Thread is currently enabled
2353 * @param tid The ID of the Thread to check
2354 * @return True if the Thread is currently enabled
2356 bool ModelChecker::is_enabled(thread_id_t tid) const
2358 return scheduler->is_enabled(tid);
2362 * Switch from a user-context to the "master thread" context (a.k.a. system
2363 * context). This switch is made with the intention of exploring a particular
2364 * model-checking action (described by a ModelAction object). Must be called
2365 * from a user-thread context.
2367 * @param act The current action that will be explored. May be NULL only if
2368 * trace is exiting via an assertion (see ModelChecker::set_assert and
2369 * ModelChecker::has_asserted).
2370 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2372 int ModelChecker::switch_to_master(ModelAction *act)
2375 Thread *old = thread_current();
2376 set_current_action(act);
2377 old->set_state(THREAD_READY);
2378 return Thread::swap(old, &system_context);
2382 * Takes the next step in the execution, if possible.
2383 * @return Returns true (success) if a step was taken and false otherwise.
2385 bool ModelChecker::take_step() {
2389 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2391 if (curr->get_state() == THREAD_READY) {
2392 ASSERT(priv->current_action);
2394 priv->nextThread = check_current_action(priv->current_action);
2395 priv->current_action = NULL;
2397 if (curr->is_blocked() || curr->is_complete())
2398 scheduler->remove_thread(curr);
2403 Thread *next = scheduler->next_thread(priv->nextThread);
2405 /* Infeasible -> don't take any more steps */
2408 else if (isfeasibleprefix() && have_bug_reports()) {
2413 if (params.bound != 0) {
2414 if (priv->used_sequence_numbers > params.bound) {
2419 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2420 next ? id_to_int(next->get_id()) : -1);
2423 * Launch end-of-execution release sequence fixups only when there are:
2425 * (1) no more user threads to run (or when execution replay chooses
2426 * the 'model_thread')
2427 * (2) pending release sequences
2428 * (3) pending assertions (i.e., data races)
2429 * (4) no pending promises
2431 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2432 isfinalfeasible() && !unrealizedraces.empty()) {
2433 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2434 pending_rel_seqs->size());
2435 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2436 std::memory_order_seq_cst, NULL, VALUE_NONE,
2438 set_current_action(fixup);
2442 /* next == NULL -> don't take any more steps */
2446 next->set_state(THREAD_RUNNING);
2448 if (next->get_pending() != NULL) {
2449 /* restart a pending action */
2450 set_current_action(next->get_pending());
2451 next->set_pending(NULL);
2452 next->set_state(THREAD_READY);
2456 /* Return false only if swap fails with an error */
2457 return (Thread::swap(&system_context, next) == 0);
2460 /** Runs the current execution until threre are no more steps to take. */
2461 void ModelChecker::finish_execution() {
2464 while (take_step());