9 #include "snapshot-interface.h"
11 #include "clockvector.h"
12 #include "cyclegraph.h"
15 #include "threads-model.h"
17 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 ModelAction *current_action;
26 unsigned int next_thread_id;
27 modelclock_t used_sequence_numbers;
29 ModelAction *next_backtrack;
32 /** @brief Constructor */
33 ModelChecker::ModelChecker(struct model_params params) :
34 /* Initialize default scheduler */
36 scheduler(new Scheduler()),
38 num_feasible_executions(0),
40 earliest_diverge(NULL),
41 action_trace(new action_list_t()),
42 thread_map(new HashTable<int, Thread *, int>()),
43 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
44 lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
45 condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
46 obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
47 promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
48 futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
49 pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
50 thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
51 node_stack(new NodeStack()),
52 mo_graph(new CycleGraph()),
53 failed_promise(false),
54 too_many_reads(false),
56 bad_synchronization(false)
58 /* Allocate this "size" on the snapshotting heap */
59 priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
60 /* First thread created will have id INITIAL_THREAD_ID */
61 priv->next_thread_id = INITIAL_THREAD_ID;
63 /* Initialize a model-checker thread, for special ModelActions */
64 model_thread = new Thread(get_next_id());
65 thread_map->put(id_to_int(model_thread->get_id()), model_thread);
68 /** @brief Destructor */
69 ModelChecker::~ModelChecker()
71 for (unsigned int i = 0; i < get_num_threads(); i++)
72 delete thread_map->get(i);
77 delete lock_waiters_map;
78 delete condvar_waiters_map;
81 for (unsigned int i = 0; i < promises->size(); i++)
82 delete (*promises)[i];
85 delete pending_rel_seqs;
87 delete thrd_last_action;
93 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr) {
94 action_list_t * tmp=hash->get(ptr);
96 tmp=new action_list_t();
102 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr) {
103 std::vector<action_list_t> * tmp=hash->get(ptr);
105 tmp=new std::vector<action_list_t>();
112 * Restores user program to initial state and resets all model-checker data
115 void ModelChecker::reset_to_initial_state()
117 DEBUG("+++ Resetting to initial state +++\n");
118 node_stack->reset_execution();
119 failed_promise = false;
120 too_many_reads = false;
121 bad_synchronization = false;
123 snapshotObject->backTrackBeforeStep(0);
126 /** @return a thread ID for a new Thread */
127 thread_id_t ModelChecker::get_next_id()
129 return priv->next_thread_id++;
132 /** @return the number of user threads created during this execution */
133 unsigned int ModelChecker::get_num_threads() const
135 return priv->next_thread_id;
138 /** @return The currently executing Thread. */
139 Thread * ModelChecker::get_current_thread()
141 return scheduler->get_current_thread();
144 /** @return a sequence number for a new ModelAction */
145 modelclock_t ModelChecker::get_next_seq_num()
147 return ++priv->used_sequence_numbers;
150 Node * ModelChecker::get_curr_node() {
151 return node_stack->get_head();
155 * @brief Choose the next thread to execute.
157 * This function chooses the next thread that should execute. It can force the
158 * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
159 * followed by a THREAD_START, or it can enforce execution replay/backtracking.
160 * The model-checker may have no preference regarding the next thread (i.e.,
161 * when exploring a new execution ordering), in which case this will return
163 * @param curr The current ModelAction. This action might guide the choice of
165 * @return The next thread to run. If the model-checker has no preference, NULL.
167 Thread * ModelChecker::get_next_thread(ModelAction *curr)
172 /* Do not split atomic actions. */
174 return thread_current();
175 /* The THREAD_CREATE action points to the created Thread */
176 else if (curr->get_type() == THREAD_CREATE)
177 return (Thread *)curr->get_location();
180 /* Have we completed exploring the preselected path? */
184 /* Else, we are trying to replay an execution */
185 ModelAction *next = node_stack->get_next()->get_action();
187 if (next == diverge) {
188 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
189 earliest_diverge=diverge;
191 Node *nextnode = next->get_node();
192 Node *prevnode = nextnode->get_parent();
193 scheduler->update_sleep_set(prevnode);
195 /* Reached divergence point */
196 if (nextnode->increment_misc()) {
197 /* The next node will try to satisfy a different misc_index values. */
198 tid = next->get_tid();
199 node_stack->pop_restofstack(2);
200 } else if (nextnode->increment_promise()) {
201 /* The next node will try to satisfy a different set of promises. */
202 tid = next->get_tid();
203 node_stack->pop_restofstack(2);
204 } else if (nextnode->increment_read_from()) {
205 /* The next node will read from a different value. */
206 tid = next->get_tid();
207 node_stack->pop_restofstack(2);
208 } else if (nextnode->increment_future_value()) {
209 /* The next node will try to read from a different future value. */
210 tid = next->get_tid();
211 node_stack->pop_restofstack(2);
212 } else if (nextnode->increment_relseq_break()) {
213 /* The next node will try to resolve a release sequence differently */
214 tid = next->get_tid();
215 node_stack->pop_restofstack(2);
217 /* Make a different thread execute for next step */
218 scheduler->add_sleep(thread_map->get(id_to_int(next->get_tid())));
219 tid = prevnode->get_next_backtrack();
220 /* Make sure the backtracked thread isn't sleeping. */
221 node_stack->pop_restofstack(1);
222 if (diverge==earliest_diverge) {
223 earliest_diverge=prevnode->get_action();
226 /* The correct sleep set is in the parent node. */
229 DEBUG("*** Divergence point ***\n");
233 tid = next->get_tid();
235 DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
236 ASSERT(tid != THREAD_ID_T_NONE);
237 return thread_map->get(id_to_int(tid));
241 * We need to know what the next actions of all threads in the sleep
242 * set will be. This method computes them and stores the actions at
243 * the corresponding thread object's pending action.
246 void ModelChecker::execute_sleep_set() {
247 for(unsigned int i=0;i<get_num_threads();i++) {
248 thread_id_t tid=int_to_id(i);
249 Thread *thr=get_thread(tid);
250 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET &&
251 thr->get_pending() == NULL ) {
252 thr->set_state(THREAD_RUNNING);
253 scheduler->next_thread(thr);
254 Thread::swap(&system_context, thr);
255 priv->current_action->set_sleep_flag();
256 thr->set_pending(priv->current_action);
259 priv->current_action = NULL;
262 void ModelChecker::wake_up_sleeping_actions(ModelAction * curr) {
263 for(unsigned int i=0;i<get_num_threads();i++) {
264 thread_id_t tid=int_to_id(i);
265 Thread *thr=get_thread(tid);
266 if ( scheduler->get_enabled(thr) == THREAD_SLEEP_SET ) {
267 ModelAction *pending_act=thr->get_pending();
268 if ((!curr->is_rmwr())&&pending_act->could_synchronize_with(curr)) {
269 //Remove this thread from sleep set
270 scheduler->remove_sleep(thr);
277 * Check if we are in a deadlock. Should only be called at the end of an
278 * execution, although it should not give false positives in the middle of an
279 * execution (there should be some ENABLED thread).
281 * @return True if program is in a deadlock; false otherwise
283 bool ModelChecker::is_deadlocked() const
285 bool blocking_threads = false;
286 for (unsigned int i = 0; i < get_num_threads(); i++) {
287 thread_id_t tid = int_to_id(i);
290 Thread *t = get_thread(tid);
291 if (!t->is_model_thread() && t->get_pending())
292 blocking_threads = true;
294 return blocking_threads;
298 * Queries the model-checker for more executions to explore and, if one
299 * exists, resets the model-checker state to execute a new execution.
301 * @return If there are more executions to explore, return true. Otherwise,
304 bool ModelChecker::next_execution()
311 printf("ERROR: DEADLOCK\n");
312 if (isfinalfeasible()) {
313 printf("Earliest divergence point since last feasible execution:\n");
314 if (earliest_diverge)
315 earliest_diverge->print();
317 printf("(Not set)\n");
319 earliest_diverge = NULL;
320 num_feasible_executions++;
323 DEBUG("Number of acquires waiting on pending release sequences: %zu\n",
324 pending_rel_seqs->size());
327 if (isfinalfeasible() || DBG_ENABLED()) {
332 if ((diverge = get_next_backtrack()) == NULL)
336 printf("Next execution will diverge at:\n");
340 reset_to_initial_state();
344 ModelAction * ModelChecker::get_last_conflict(ModelAction *act)
346 switch (act->get_type()) {
350 /* linear search: from most recent to oldest */
351 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
352 action_list_t::reverse_iterator rit;
353 for (rit = list->rbegin(); rit != list->rend(); rit++) {
354 ModelAction *prev = *rit;
355 if (prev->could_synchronize_with(act))
361 case ATOMIC_TRYLOCK: {
362 /* linear search: from most recent to oldest */
363 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
364 action_list_t::reverse_iterator rit;
365 for (rit = list->rbegin(); rit != list->rend(); rit++) {
366 ModelAction *prev = *rit;
367 if (act->is_conflicting_lock(prev))
372 case ATOMIC_UNLOCK: {
373 /* linear search: from most recent to oldest */
374 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
375 action_list_t::reverse_iterator rit;
376 for (rit = list->rbegin(); rit != list->rend(); rit++) {
377 ModelAction *prev = *rit;
378 if (!act->same_thread(prev)&&prev->is_failed_trylock())
384 /* linear search: from most recent to oldest */
385 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
386 action_list_t::reverse_iterator rit;
387 for (rit = list->rbegin(); rit != list->rend(); rit++) {
388 ModelAction *prev = *rit;
389 if (!act->same_thread(prev)&&prev->is_failed_trylock())
391 if (!act->same_thread(prev)&&prev->is_notify())
397 case ATOMIC_NOTIFY_ALL:
398 case ATOMIC_NOTIFY_ONE: {
399 /* linear search: from most recent to oldest */
400 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
401 action_list_t::reverse_iterator rit;
402 for (rit = list->rbegin(); rit != list->rend(); rit++) {
403 ModelAction *prev = *rit;
404 if (!act->same_thread(prev)&&prev->is_wait())
415 /** This method finds backtracking points where we should try to
416 * reorder the parameter ModelAction against.
418 * @param the ModelAction to find backtracking points for.
420 void ModelChecker::set_backtracking(ModelAction *act)
422 Thread *t = get_thread(act);
423 ModelAction * prev = get_last_conflict(act);
427 Node * node = prev->get_node()->get_parent();
429 int low_tid, high_tid;
430 if (node->is_enabled(t)) {
431 low_tid = id_to_int(act->get_tid());
432 high_tid = low_tid+1;
435 high_tid = get_num_threads();
438 for(int i = low_tid; i < high_tid; i++) {
439 thread_id_t tid = int_to_id(i);
441 /* Make sure this thread can be enabled here. */
442 if (i >= node->get_num_threads())
445 /* Don't backtrack into a point where the thread is disabled or sleeping. */
446 if (node->enabled_status(tid)!=THREAD_ENABLED)
449 /* Check if this has been explored already */
450 if (node->has_been_explored(tid))
453 /* See if fairness allows */
454 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
456 for(int t=0;t<node->get_num_threads();t++) {
457 thread_id_t tother=int_to_id(t);
458 if (node->is_enabled(tother) && node->has_priority(tother)) {
466 /* Cache the latest backtracking point */
467 if (!priv->next_backtrack || *prev > *priv->next_backtrack)
468 priv->next_backtrack = prev;
470 /* If this is a new backtracking point, mark the tree */
471 if (!node->set_backtrack(tid))
473 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
474 id_to_int(prev->get_tid()),
475 id_to_int(t->get_id()));
484 * Returns last backtracking point. The model checker will explore a different
485 * path for this point in the next execution.
486 * @return The ModelAction at which the next execution should diverge.
488 ModelAction * ModelChecker::get_next_backtrack()
490 ModelAction *next = priv->next_backtrack;
491 priv->next_backtrack = NULL;
496 * Processes a read or rmw model action.
497 * @param curr is the read model action to process.
498 * @param second_part_of_rmw is boolean that is true is this is the second action of a rmw.
499 * @return True if processing this read updates the mo_graph.
501 bool ModelChecker::process_read(ModelAction *curr, bool second_part_of_rmw)
503 uint64_t value = VALUE_NONE;
504 bool updated = false;
506 const ModelAction *reads_from = curr->get_node()->get_read_from();
507 if (reads_from != NULL) {
508 mo_graph->startChanges();
510 value = reads_from->get_value();
511 bool r_status = false;
513 if (!second_part_of_rmw) {
514 check_recency(curr, reads_from);
515 r_status = r_modification_order(curr, reads_from);
519 if (!second_part_of_rmw&&!isfeasible()&&(curr->get_node()->increment_read_from()||curr->get_node()->increment_future_value())) {
520 mo_graph->rollbackChanges();
521 too_many_reads = false;
525 curr->read_from(reads_from);
526 mo_graph->commitChanges();
527 mo_check_promises(curr->get_tid(), reads_from);
530 } else if (!second_part_of_rmw) {
531 /* Read from future value */
532 value = curr->get_node()->get_future_value();
533 modelclock_t expiration = curr->get_node()->get_future_value_expiration();
534 curr->read_from(NULL);
535 Promise *valuepromise = new Promise(curr, value, expiration);
536 promises->push_back(valuepromise);
538 get_thread(curr)->set_return_value(value);
544 * Processes a lock, trylock, or unlock model action. @param curr is
545 * the read model action to process.
547 * The try lock operation checks whether the lock is taken. If not,
548 * it falls to the normal lock operation case. If so, it returns
551 * The lock operation has already been checked that it is enabled, so
552 * it just grabs the lock and synchronizes with the previous unlock.
554 * The unlock operation has to re-enable all of the threads that are
555 * waiting on the lock.
557 * @return True if synchronization was updated; false otherwise
559 bool ModelChecker::process_mutex(ModelAction *curr) {
560 std::mutex *mutex=NULL;
561 struct std::mutex_state *state=NULL;
563 if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
564 mutex = (std::mutex *)curr->get_location();
565 state = mutex->get_state();
566 } else if(curr->is_wait()) {
567 mutex = (std::mutex *)curr->get_value();
568 state = mutex->get_state();
571 switch (curr->get_type()) {
572 case ATOMIC_TRYLOCK: {
573 bool success = !state->islocked;
574 curr->set_try_lock(success);
576 get_thread(curr)->set_return_value(0);
579 get_thread(curr)->set_return_value(1);
581 //otherwise fall into the lock case
583 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) {
584 printf("Lock access before initialization\n");
587 state->islocked = true;
588 ModelAction *unlock = get_last_unlock(curr);
589 //synchronize with the previous unlock statement
590 if (unlock != NULL) {
591 curr->synchronize_with(unlock);
596 case ATOMIC_UNLOCK: {
598 state->islocked = false;
599 //wake up the other threads
600 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
601 //activate all the waiting threads
602 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
603 scheduler->wake(get_thread(*rit));
610 state->islocked = false;
611 //wake up the other threads
612 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
613 //activate all the waiting threads
614 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
615 scheduler->wake(get_thread(*rit));
618 //check whether we should go to sleep or not...simulate spurious failures
619 if (curr->get_node()->get_misc()==0) {
620 get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
622 scheduler->sleep(get_current_thread());
626 case ATOMIC_NOTIFY_ALL: {
627 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
628 //activate all the waiting threads
629 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
630 scheduler->wake(get_thread(*rit));
635 case ATOMIC_NOTIFY_ONE: {
636 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
637 int wakeupthread=curr->get_node()->get_misc();
638 action_list_t::iterator it = waiters->begin();
639 advance(it, wakeupthread);
640 scheduler->wake(get_thread(*it));
652 * Process a write ModelAction
653 * @param curr The ModelAction to process
654 * @return True if the mo_graph was updated or promises were resolved
656 bool ModelChecker::process_write(ModelAction *curr)
658 bool updated_mod_order = w_modification_order(curr);
659 bool updated_promises = resolve_promises(curr);
661 if (promises->size() == 0) {
662 for (unsigned int i = 0; i < futurevalues->size(); i++) {
663 struct PendingFutureValue pfv = (*futurevalues)[i];
664 //Do more ambitious checks now that mo is more complete
665 if (mo_may_allow(pfv.writer, pfv.act)&&
666 pfv.act->get_node()->add_future_value(pfv.writer->get_value(), pfv.writer->get_seq_number()+params.maxfuturedelay) &&
667 (!priv->next_backtrack || *pfv.act > *priv->next_backtrack))
668 priv->next_backtrack = pfv.act;
670 futurevalues->resize(0);
673 mo_graph->commitChanges();
674 mo_check_promises(curr->get_tid(), curr);
676 get_thread(curr)->set_return_value(VALUE_NONE);
677 return updated_mod_order || updated_promises;
681 * @brief Process the current action for thread-related activity
683 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
684 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
685 * synchronization, etc. This function is a no-op for non-THREAD actions
686 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
688 * @param curr The current action
689 * @return True if synchronization was updated or a thread completed
691 bool ModelChecker::process_thread_action(ModelAction *curr)
693 bool updated = false;
695 switch (curr->get_type()) {
696 case THREAD_CREATE: {
697 Thread *th = (Thread *)curr->get_location();
698 th->set_creation(curr);
702 Thread *blocking = (Thread *)curr->get_location();
703 ModelAction *act = get_last_action(blocking->get_id());
704 curr->synchronize_with(act);
705 updated = true; /* trigger rel-seq checks */
708 case THREAD_FINISH: {
709 Thread *th = get_thread(curr);
710 while (!th->wait_list_empty()) {
711 ModelAction *act = th->pop_wait_list();
712 scheduler->wake(get_thread(act));
715 updated = true; /* trigger rel-seq checks */
719 check_promises(curr->get_tid(), NULL, curr->get_cv());
730 * @brief Process the current action for release sequence fixup activity
732 * Performs model-checker release sequence fixups for the current action,
733 * forcing a single pending release sequence to break (with a given, potential
734 * "loose" write) or to complete (i.e., synchronize). If a pending release
735 * sequence forms a complete release sequence, then we must perform the fixup
736 * synchronization, mo_graph additions, etc.
738 * @param curr The current action; must be a release sequence fixup action
739 * @param work_queue The work queue to which to add work items as they are
742 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
744 const ModelAction *write = curr->get_node()->get_relseq_break();
745 struct release_seq *sequence = pending_rel_seqs->back();
746 pending_rel_seqs->pop_back();
748 ModelAction *acquire = sequence->acquire;
749 const ModelAction *rf = sequence->rf;
750 const ModelAction *release = sequence->release;
754 ASSERT(release->same_thread(rf));
758 * @todo Forcing a synchronization requires that we set
759 * modification order constraints. For instance, we can't allow
760 * a fixup sequence in which two separate read-acquire
761 * operations read from the same sequence, where the first one
762 * synchronizes and the other doesn't. Essentially, we can't
763 * allow any writes to insert themselves between 'release' and
767 /* Must synchronize */
768 if (!acquire->synchronize_with(release)) {
769 set_bad_synchronization();
772 /* Re-check all pending release sequences */
773 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
774 /* Re-check act for mo_graph edges */
775 work_queue->push_back(MOEdgeWorkEntry(acquire));
777 /* propagate synchronization to later actions */
778 action_list_t::reverse_iterator rit = action_trace->rbegin();
779 for (; (*rit) != acquire; rit++) {
780 ModelAction *propagate = *rit;
781 if (acquire->happens_before(propagate)) {
782 propagate->synchronize_with(acquire);
783 /* Re-check 'propagate' for mo_graph edges */
784 work_queue->push_back(MOEdgeWorkEntry(propagate));
788 /* Break release sequence with new edges:
789 * release --mo--> write --mo--> rf */
790 mo_graph->addEdge(release, write);
791 mo_graph->addEdge(write, rf);
794 /* See if we have realized a data race */
795 if (checkDataRaces())
800 * Initialize the current action by performing one or more of the following
801 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
802 * in the NodeStack, manipulating backtracking sets, allocating and
803 * initializing clock vectors, and computing the promises to fulfill.
805 * @param curr The current action, as passed from the user context; may be
806 * freed/invalidated after the execution of this function, with a different
807 * action "returned" its place (pass-by-reference)
808 * @return True if curr is a newly-explored action; false otherwise
810 bool ModelChecker::initialize_curr_action(ModelAction **curr)
812 ModelAction *newcurr;
814 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
815 newcurr = process_rmw(*curr);
818 if (newcurr->is_rmw())
819 compute_promises(newcurr);
825 (*curr)->set_seq_number(get_next_seq_num());
827 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled());
829 /* First restore type and order in case of RMW operation */
830 if ((*curr)->is_rmwr())
831 newcurr->copy_typeandorder(*curr);
833 ASSERT((*curr)->get_location() == newcurr->get_location());
834 newcurr->copy_from_new(*curr);
836 /* Discard duplicate ModelAction; use action from NodeStack */
839 /* Always compute new clock vector */
840 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
843 return false; /* Action was explored previously */
847 /* Always compute new clock vector */
848 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
850 * Perform one-time actions when pushing new ModelAction onto
853 if (newcurr->is_write())
854 compute_promises(newcurr);
855 else if (newcurr->is_relseq_fixup())
856 compute_relseq_breakwrites(newcurr);
857 else if (newcurr->is_wait())
858 newcurr->get_node()->set_misc_max(2);
859 else if (newcurr->is_notify_one()) {
860 newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
862 return true; /* This was a new ModelAction */
867 * @brief Check whether a model action is enabled.
869 * Checks whether a lock or join operation would be successful (i.e., is the
870 * lock already locked, or is the joined thread already complete). If not, put
871 * the action in a waiter list.
873 * @param curr is the ModelAction to check whether it is enabled.
874 * @return a bool that indicates whether the action is enabled.
876 bool ModelChecker::check_action_enabled(ModelAction *curr) {
877 if (curr->is_lock()) {
878 std::mutex * lock = (std::mutex *)curr->get_location();
879 struct std::mutex_state * state = lock->get_state();
880 if (state->islocked) {
881 //Stick the action in the appropriate waiting queue
882 get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
885 } else if (curr->get_type() == THREAD_JOIN) {
886 Thread *blocking = (Thread *)curr->get_location();
887 if (!blocking->is_complete()) {
888 blocking->push_wait_list(curr);
897 * Stores the ModelAction for the current thread action. Call this
898 * immediately before switching from user- to system-context to pass
900 * @param act The ModelAction created by the user-thread action
902 void ModelChecker::set_current_action(ModelAction *act) {
903 priv->current_action = act;
907 * This is the heart of the model checker routine. It performs model-checking
908 * actions corresponding to a given "current action." Among other processes, it
909 * calculates reads-from relationships, updates synchronization clock vectors,
910 * forms a memory_order constraints graph, and handles replay/backtrack
911 * execution when running permutations of previously-observed executions.
913 * @param curr The current action to process
914 * @return The next Thread that must be executed. May be NULL if ModelChecker
915 * makes no choice (e.g., according to replay execution, combining RMW actions,
918 Thread * ModelChecker::check_current_action(ModelAction *curr)
921 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
923 if (!check_action_enabled(curr)) {
924 /* Make the execution look like we chose to run this action
925 * much later, when a lock/join can succeed */
926 get_current_thread()->set_pending(curr);
927 scheduler->sleep(get_current_thread());
928 return get_next_thread(NULL);
931 bool newly_explored = initialize_curr_action(&curr);
933 wake_up_sleeping_actions(curr);
935 /* Add the action to lists before any other model-checking tasks */
936 if (!second_part_of_rmw)
937 add_action_to_lists(curr);
939 /* Build may_read_from set for newly-created actions */
940 if (newly_explored && curr->is_read())
941 build_reads_from_past(curr);
943 /* Initialize work_queue with the "current action" work */
944 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
945 while (!work_queue.empty()) {
946 WorkQueueEntry work = work_queue.front();
947 work_queue.pop_front();
950 case WORK_CHECK_CURR_ACTION: {
951 ModelAction *act = work.action;
952 bool update = false; /* update this location's release seq's */
953 bool update_all = false; /* update all release seq's */
955 if (process_thread_action(curr))
958 if (act->is_read() && process_read(act, second_part_of_rmw))
961 if (act->is_write() && process_write(act))
964 if (act->is_mutex_op() && process_mutex(act))
967 if (act->is_relseq_fixup())
968 process_relseq_fixup(curr, &work_queue);
971 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
973 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
976 case WORK_CHECK_RELEASE_SEQ:
977 resolve_release_sequences(work.location, &work_queue);
979 case WORK_CHECK_MO_EDGES: {
980 /** @todo Complete verification of work_queue */
981 ModelAction *act = work.action;
982 bool updated = false;
984 if (act->is_read()) {
985 const ModelAction *rf = act->get_reads_from();
986 if (rf != NULL && r_modification_order(act, rf))
989 if (act->is_write()) {
990 if (w_modification_order(act))
993 mo_graph->commitChanges();
996 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1005 check_curr_backtracking(curr);
1006 set_backtracking(curr);
1007 return get_next_thread(curr);
1010 void ModelChecker::check_curr_backtracking(ModelAction * curr) {
1011 Node *currnode = curr->get_node();
1012 Node *parnode = currnode->get_parent();
1014 if ((!parnode->backtrack_empty() ||
1015 !currnode->misc_empty() ||
1016 !currnode->read_from_empty() ||
1017 !currnode->future_value_empty() ||
1018 !currnode->promise_empty() ||
1019 !currnode->relseq_break_empty())
1020 && (!priv->next_backtrack ||
1021 *curr > *priv->next_backtrack)) {
1022 priv->next_backtrack = curr;
1026 bool ModelChecker::promises_expired() {
1027 for (unsigned int promise_index = 0; promise_index < promises->size(); promise_index++) {
1028 Promise *promise = (*promises)[promise_index];
1029 if (promise->get_expiration()<priv->used_sequence_numbers) {
1036 /** @return whether the current partial trace must be a prefix of a
1037 * feasible trace. */
1038 bool ModelChecker::isfeasibleprefix() {
1039 return promises->size() == 0 && pending_rel_seqs->size() == 0 && isfeasible();
1042 /** @return whether the current partial trace is feasible. */
1043 bool ModelChecker::isfeasible() {
1044 if (DBG_ENABLED() && mo_graph->checkForRMWViolation())
1045 DEBUG("Infeasible: RMW violation\n");
1047 return !mo_graph->checkForRMWViolation() && isfeasibleotherthanRMW();
1050 /** @return whether the current partial trace is feasible other than
1051 * multiple RMW reading from the same store. */
1052 bool ModelChecker::isfeasibleotherthanRMW() {
1053 if (DBG_ENABLED()) {
1054 if (mo_graph->checkForCycles())
1055 DEBUG("Infeasible: modification order cycles\n");
1057 DEBUG("Infeasible: failed promise\n");
1059 DEBUG("Infeasible: too many reads\n");
1060 if (bad_synchronization)
1061 DEBUG("Infeasible: bad synchronization ordering\n");
1062 if (promises_expired())
1063 DEBUG("Infeasible: promises expired\n");
1065 return !mo_graph->checkForCycles() && !failed_promise && !too_many_reads && !bad_synchronization && !promises_expired();
1068 /** Returns whether the current completed trace is feasible. */
1069 bool ModelChecker::isfinalfeasible() {
1070 if (DBG_ENABLED() && promises->size() != 0)
1071 DEBUG("Infeasible: unrevolved promises\n");
1073 return isfeasible() && promises->size() == 0;
1076 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1077 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1078 ModelAction *lastread = get_last_action(act->get_tid());
1079 lastread->process_rmw(act);
1080 if (act->is_rmw() && lastread->get_reads_from()!=NULL) {
1081 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1082 mo_graph->commitChanges();
1088 * Checks whether a thread has read from the same write for too many times
1089 * without seeing the effects of a later write.
1092 * 1) there must a different write that we could read from that would satisfy the modification order,
1093 * 2) we must have read from the same value in excess of maxreads times, and
1094 * 3) that other write must have been in the reads_from set for maxreads times.
1096 * If so, we decide that the execution is no longer feasible.
1098 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf) {
1099 if (params.maxreads != 0) {
1101 if (curr->get_node()->get_read_from_size() <= 1)
1103 //Must make sure that execution is currently feasible... We could
1104 //accidentally clear by rolling back
1107 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1108 int tid = id_to_int(curr->get_tid());
1111 if ((int)thrd_lists->size() <= tid)
1113 action_list_t *list = &(*thrd_lists)[tid];
1115 action_list_t::reverse_iterator rit = list->rbegin();
1116 /* Skip past curr */
1117 for (; (*rit) != curr; rit++)
1119 /* go past curr now */
1122 action_list_t::reverse_iterator ritcopy = rit;
1123 //See if we have enough reads from the same value
1125 for (; count < params.maxreads; rit++,count++) {
1126 if (rit==list->rend())
1128 ModelAction *act = *rit;
1129 if (!act->is_read())
1132 if (act->get_reads_from() != rf)
1134 if (act->get_node()->get_read_from_size() <= 1)
1137 for (int i = 0; i<curr->get_node()->get_read_from_size(); i++) {
1139 const ModelAction * write = curr->get_node()->get_read_from_at(i);
1141 //Need a different write
1145 /* Test to see whether this is a feasible write to read from*/
1146 mo_graph->startChanges();
1147 r_modification_order(curr, write);
1148 bool feasiblereadfrom = isfeasible();
1149 mo_graph->rollbackChanges();
1151 if (!feasiblereadfrom)
1155 bool feasiblewrite = true;
1156 //new we need to see if this write works for everyone
1158 for (int loop = count; loop>0; loop--,rit++) {
1159 ModelAction *act=*rit;
1160 bool foundvalue = false;
1161 for (int j = 0; j<act->get_node()->get_read_from_size(); j++) {
1162 if (act->get_node()->get_read_from_at(j)==write) {
1168 feasiblewrite = false;
1172 if (feasiblewrite) {
1173 too_many_reads = true;
1181 * Updates the mo_graph with the constraints imposed from the current
1184 * Basic idea is the following: Go through each other thread and find
1185 * the lastest action that happened before our read. Two cases:
1187 * (1) The action is a write => that write must either occur before
1188 * the write we read from or be the write we read from.
1190 * (2) The action is a read => the write that that action read from
1191 * must occur before the write we read from or be the same write.
1193 * @param curr The current action. Must be a read.
1194 * @param rf The action that curr reads from. Must be a write.
1195 * @return True if modification order edges were added; false otherwise
1197 bool ModelChecker::r_modification_order(ModelAction *curr, const ModelAction *rf)
1199 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1202 ASSERT(curr->is_read());
1204 /* Iterate over all threads */
1205 for (i = 0; i < thrd_lists->size(); i++) {
1206 /* Iterate over actions in thread, starting from most recent */
1207 action_list_t *list = &(*thrd_lists)[i];
1208 action_list_t::reverse_iterator rit;
1209 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1210 ModelAction *act = *rit;
1213 * Include at most one act per-thread that "happens
1214 * before" curr. Don't consider reflexively.
1216 if (act->happens_before(curr) && act != curr) {
1217 if (act->is_write()) {
1219 mo_graph->addEdge(act, rf);
1223 const ModelAction *prevreadfrom = act->get_reads_from();
1224 //if the previous read is unresolved, keep going...
1225 if (prevreadfrom == NULL)
1228 if (rf != prevreadfrom) {
1229 mo_graph->addEdge(prevreadfrom, rf);
1241 /** This method fixes up the modification order when we resolve a
1242 * promises. The basic problem is that actions that occur after the
1243 * read curr could not property add items to the modification order
1246 * So for each thread, we find the earliest item that happens after
1247 * the read curr. This is the item we have to fix up with additional
1248 * constraints. If that action is write, we add a MO edge between
1249 * the Action rf and that action. If the action is a read, we add a
1250 * MO edge between the Action rf, and whatever the read accessed.
1252 * @param curr is the read ModelAction that we are fixing up MO edges for.
1253 * @param rf is the write ModelAction that curr reads from.
1256 void ModelChecker::post_r_modification_order(ModelAction *curr, const ModelAction *rf)
1258 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1260 ASSERT(curr->is_read());
1262 /* Iterate over all threads */
1263 for (i = 0; i < thrd_lists->size(); i++) {
1264 /* Iterate over actions in thread, starting from most recent */
1265 action_list_t *list = &(*thrd_lists)[i];
1266 action_list_t::reverse_iterator rit;
1267 ModelAction *lastact = NULL;
1269 /* Find last action that happens after curr that is either not curr or a rmw */
1270 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1271 ModelAction *act = *rit;
1272 if (curr->happens_before(act) && (curr != act || curr->is_rmw())) {
1278 /* Include at most one act per-thread that "happens before" curr */
1279 if (lastact != NULL) {
1280 if (lastact==curr) {
1281 //Case 1: The resolved read is a RMW, and we need to make sure
1282 //that the write portion of the RMW mod order after rf
1284 mo_graph->addEdge(rf, lastact);
1285 } else if (lastact->is_read()) {
1286 //Case 2: The resolved read is a normal read and the next
1287 //operation is a read, and we need to make sure the value read
1288 //is mod ordered after rf
1290 const ModelAction *postreadfrom = lastact->get_reads_from();
1291 if (postreadfrom != NULL&&rf != postreadfrom)
1292 mo_graph->addEdge(rf, postreadfrom);
1294 //Case 3: The resolved read is a normal read and the next
1295 //operation is a write, and we need to make sure that the
1296 //write is mod ordered after rf
1298 mo_graph->addEdge(rf, lastact);
1306 * Updates the mo_graph with the constraints imposed from the current write.
1308 * Basic idea is the following: Go through each other thread and find
1309 * the lastest action that happened before our write. Two cases:
1311 * (1) The action is a write => that write must occur before
1314 * (2) The action is a read => the write that that action read from
1315 * must occur before the current write.
1317 * This method also handles two other issues:
1319 * (I) Sequential Consistency: Making sure that if the current write is
1320 * seq_cst, that it occurs after the previous seq_cst write.
1322 * (II) Sending the write back to non-synchronizing reads.
1324 * @param curr The current action. Must be a write.
1325 * @return True if modification order edges were added; false otherwise
1327 bool ModelChecker::w_modification_order(ModelAction *curr)
1329 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1332 ASSERT(curr->is_write());
1334 if (curr->is_seqcst()) {
1335 /* We have to at least see the last sequentially consistent write,
1336 so we are initialized. */
1337 ModelAction *last_seq_cst = get_last_seq_cst(curr);
1338 if (last_seq_cst != NULL) {
1339 mo_graph->addEdge(last_seq_cst, curr);
1344 /* Iterate over all threads */
1345 for (i = 0; i < thrd_lists->size(); i++) {
1346 /* Iterate over actions in thread, starting from most recent */
1347 action_list_t *list = &(*thrd_lists)[i];
1348 action_list_t::reverse_iterator rit;
1349 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1350 ModelAction *act = *rit;
1353 * 1) If RMW and it actually read from something, then we
1354 * already have all relevant edges, so just skip to next
1357 * 2) If RMW and it didn't read from anything, we should
1358 * whatever edge we can get to speed up convergence.
1360 * 3) If normal write, we need to look at earlier actions, so
1361 * continue processing list.
1363 if (curr->is_rmw()) {
1364 if (curr->get_reads_from()!=NULL)
1373 * Include at most one act per-thread that "happens
1376 if (act->happens_before(curr)) {
1378 * Note: if act is RMW, just add edge:
1380 * The following edge should be handled elsewhere:
1381 * readfrom(act) --mo--> act
1383 if (act->is_write())
1384 mo_graph->addEdge(act, curr);
1385 else if (act->is_read()) {
1386 //if previous read accessed a null, just keep going
1387 if (act->get_reads_from() == NULL)
1389 mo_graph->addEdge(act->get_reads_from(), curr);
1393 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1394 !act->same_thread(curr)) {
1395 /* We have an action that:
1396 (1) did not happen before us
1397 (2) is a read and we are a write
1398 (3) cannot synchronize with us
1399 (4) is in a different thread
1401 that read could potentially read from our write. Note that
1402 these checks are overly conservative at this point, we'll
1403 do more checks before actually removing the
1407 if (thin_air_constraint_may_allow(curr, act)) {
1409 (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() == act->get_reads_from() && isfeasibleotherthanRMW())) {
1410 struct PendingFutureValue pfv = {curr,act};
1411 futurevalues->push_back(pfv);
1421 /** Arbitrary reads from the future are not allowed. Section 29.3
1422 * part 9 places some constraints. This method checks one result of constraint
1423 * constraint. Others require compiler support. */
1424 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction * writer, const ModelAction *reader) {
1425 if (!writer->is_rmw())
1428 if (!reader->is_rmw())
1431 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1432 if (search == reader)
1434 if (search->get_tid() == reader->get_tid() &&
1435 search->happens_before(reader))
1443 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1444 * some constraints. This method checks one the following constraint (others
1445 * require compiler support):
1447 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1449 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1451 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1453 /* Iterate over all threads */
1454 for (i = 0; i < thrd_lists->size(); i++) {
1455 const ModelAction *write_after_read = NULL;
1457 /* Iterate over actions in thread, starting from most recent */
1458 action_list_t *list = &(*thrd_lists)[i];
1459 action_list_t::reverse_iterator rit;
1460 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1461 ModelAction *act = *rit;
1463 if (!reader->happens_before(act))
1465 else if (act->is_write())
1466 write_after_read = act;
1467 else if (act->is_read() && act->get_reads_from() != NULL && act != reader) {
1468 write_after_read = act->get_reads_from();
1472 if (write_after_read && write_after_read!=writer && mo_graph->checkReachable(write_after_read, writer))
1479 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1480 * The ModelAction under consideration is expected to be taking part in
1481 * release/acquire synchronization as an object of the "reads from" relation.
1482 * Note that this can only provide release sequence support for RMW chains
1483 * which do not read from the future, as those actions cannot be traced until
1484 * their "promise" is fulfilled. Similarly, we may not even establish the
1485 * presence of a release sequence with certainty, as some modification order
1486 * constraints may be decided further in the future. Thus, this function
1487 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1488 * and a boolean representing certainty.
1490 * @param rf The action that might be part of a release sequence. Must be a
1492 * @param release_heads A pass-by-reference style return parameter. After
1493 * execution of this function, release_heads will contain the heads of all the
1494 * relevant release sequences, if any exists with certainty
1495 * @param pending A pass-by-reference style return parameter which is only used
1496 * when returning false (i.e., uncertain). Returns most information regarding
1497 * an uncertain release sequence, including any write operations that might
1498 * break the sequence.
1499 * @return true, if the ModelChecker is certain that release_heads is complete;
1502 bool ModelChecker::release_seq_heads(const ModelAction *rf,
1503 rel_heads_list_t *release_heads,
1504 struct release_seq *pending) const
1506 /* Only check for release sequences if there are no cycles */
1507 if (mo_graph->checkForCycles())
1511 ASSERT(rf->is_write());
1513 if (rf->is_release())
1514 release_heads->push_back(rf);
1516 break; /* End of RMW chain */
1518 /** @todo Need to be smarter here... In the linux lock
1519 * example, this will run to the beginning of the program for
1521 /** @todo The way to be smarter here is to keep going until 1
1522 * thread has a release preceded by an acquire and you've seen
1525 /* acq_rel RMW is a sufficient stopping condition */
1526 if (rf->is_acquire() && rf->is_release())
1527 return true; /* complete */
1529 rf = rf->get_reads_from();
1532 /* read from future: need to settle this later */
1534 return false; /* incomplete */
1537 if (rf->is_release())
1538 return true; /* complete */
1540 /* else relaxed write; check modification order for contiguous subsequence
1541 * -> rf must be same thread as release */
1542 int tid = id_to_int(rf->get_tid());
1543 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
1544 action_list_t *list = &(*thrd_lists)[tid];
1545 action_list_t::const_reverse_iterator rit;
1547 /* Find rf in the thread list */
1548 rit = std::find(list->rbegin(), list->rend(), rf);
1549 ASSERT(rit != list->rend());
1551 /* Find the last write/release */
1552 for (; rit != list->rend(); rit++)
1553 if ((*rit)->is_release())
1555 if (rit == list->rend()) {
1556 /* No write-release in this thread */
1557 return true; /* complete */
1559 ModelAction *release = *rit;
1561 ASSERT(rf->same_thread(release));
1563 pending->writes.clear();
1565 bool certain = true;
1566 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1567 if (id_to_int(rf->get_tid()) == (int)i)
1569 list = &(*thrd_lists)[i];
1571 /* Can we ensure no future writes from this thread may break
1572 * the release seq? */
1573 bool future_ordered = false;
1575 ModelAction *last = get_last_action(int_to_id(i));
1576 Thread *th = get_thread(int_to_id(i));
1577 if ((last && rf->happens_before(last)) ||
1580 future_ordered = true;
1582 ASSERT(!th->is_model_thread() || future_ordered);
1584 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1585 const ModelAction *act = *rit;
1586 /* Reach synchronization -> this thread is complete */
1587 if (act->happens_before(release))
1589 if (rf->happens_before(act)) {
1590 future_ordered = true;
1594 /* Only non-RMW writes can break release sequences */
1595 if (!act->is_write() || act->is_rmw())
1598 /* Check modification order */
1599 if (mo_graph->checkReachable(rf, act)) {
1600 /* rf --mo--> act */
1601 future_ordered = true;
1604 if (mo_graph->checkReachable(act, release))
1605 /* act --mo--> release */
1607 if (mo_graph->checkReachable(release, act) &&
1608 mo_graph->checkReachable(act, rf)) {
1609 /* release --mo-> act --mo--> rf */
1610 return true; /* complete */
1612 /* act may break release sequence */
1613 pending->writes.push_back(act);
1616 if (!future_ordered)
1617 certain = false; /* This thread is uncertain */
1621 release_heads->push_back(release);
1622 pending->writes.clear();
1624 pending->release = release;
1631 * A public interface for getting the release sequence head(s) with which a
1632 * given ModelAction must synchronize. This function only returns a non-empty
1633 * result when it can locate a release sequence head with certainty. Otherwise,
1634 * it may mark the internal state of the ModelChecker so that it will handle
1635 * the release sequence at a later time, causing @a act to update its
1636 * synchronization at some later point in execution.
1637 * @param act The 'acquire' action that may read from a release sequence
1638 * @param release_heads A pass-by-reference return parameter. Will be filled
1639 * with the head(s) of the release sequence(s), if they exists with certainty.
1640 * @see ModelChecker::release_seq_heads
1642 void ModelChecker::get_release_seq_heads(ModelAction *act, rel_heads_list_t *release_heads)
1644 const ModelAction *rf = act->get_reads_from();
1645 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
1646 sequence->acquire = act;
1648 if (!release_seq_heads(rf, release_heads, sequence)) {
1649 /* add act to 'lazy checking' list */
1650 pending_rel_seqs->push_back(sequence);
1652 snapshot_free(sequence);
1657 * Attempt to resolve all stashed operations that might synchronize with a
1658 * release sequence for a given location. This implements the "lazy" portion of
1659 * determining whether or not a release sequence was contiguous, since not all
1660 * modification order information is present at the time an action occurs.
1662 * @param location The location/object that should be checked for release
1663 * sequence resolutions. A NULL value means to check all locations.
1664 * @param work_queue The work queue to which to add work items as they are
1666 * @return True if any updates occurred (new synchronization, new mo_graph
1669 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
1671 bool updated = false;
1672 std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
1673 while (it != pending_rel_seqs->end()) {
1674 struct release_seq *pending = *it;
1675 ModelAction *act = pending->acquire;
1677 /* Only resolve sequences on the given location, if provided */
1678 if (location && act->get_location() != location) {
1683 const ModelAction *rf = act->get_reads_from();
1684 rel_heads_list_t release_heads;
1686 complete = release_seq_heads(rf, &release_heads, pending);
1687 for (unsigned int i = 0; i < release_heads.size(); i++) {
1688 if (!act->has_synchronized_with(release_heads[i])) {
1689 if (act->synchronize_with(release_heads[i]))
1692 set_bad_synchronization();
1697 /* Re-check all pending release sequences */
1698 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1699 /* Re-check act for mo_graph edges */
1700 work_queue->push_back(MOEdgeWorkEntry(act));
1702 /* propagate synchronization to later actions */
1703 action_list_t::reverse_iterator rit = action_trace->rbegin();
1704 for (; (*rit) != act; rit++) {
1705 ModelAction *propagate = *rit;
1706 if (act->happens_before(propagate)) {
1707 propagate->synchronize_with(act);
1708 /* Re-check 'propagate' for mo_graph edges */
1709 work_queue->push_back(MOEdgeWorkEntry(propagate));
1714 it = pending_rel_seqs->erase(it);
1715 snapshot_free(pending);
1721 // If we resolved promises or data races, see if we have realized a data race.
1722 if (checkDataRaces()) {
1730 * Performs various bookkeeping operations for the current ModelAction. For
1731 * instance, adds action to the per-object, per-thread action vector and to the
1732 * action trace list of all thread actions.
1734 * @param act is the ModelAction to add.
1736 void ModelChecker::add_action_to_lists(ModelAction *act)
1738 int tid = id_to_int(act->get_tid());
1739 action_trace->push_back(act);
1741 get_safe_ptr_action(obj_map, act->get_location())->push_back(act);
1743 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
1744 if (tid >= (int)vec->size())
1745 vec->resize(priv->next_thread_id);
1746 (*vec)[tid].push_back(act);
1748 if ((int)thrd_last_action->size() <= tid)
1749 thrd_last_action->resize(get_num_threads());
1750 (*thrd_last_action)[tid] = act;
1752 if (act->is_wait()) {
1753 void *mutex_loc=(void *) act->get_value();
1754 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
1756 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
1757 if (tid >= (int)vec->size())
1758 vec->resize(priv->next_thread_id);
1759 (*vec)[tid].push_back(act);
1761 if ((int)thrd_last_action->size() <= tid)
1762 thrd_last_action->resize(get_num_threads());
1763 (*thrd_last_action)[tid] = act;
1768 * @brief Get the last action performed by a particular Thread
1769 * @param tid The thread ID of the Thread in question
1770 * @return The last action in the thread
1772 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
1774 int threadid = id_to_int(tid);
1775 if (threadid < (int)thrd_last_action->size())
1776 return (*thrd_last_action)[id_to_int(tid)];
1782 * Gets the last memory_order_seq_cst write (in the total global sequence)
1783 * performed on a particular object (i.e., memory location), not including the
1785 * @param curr The current ModelAction; also denotes the object location to
1787 * @return The last seq_cst write
1789 ModelAction * ModelChecker::get_last_seq_cst(ModelAction *curr) const
1791 void *location = curr->get_location();
1792 action_list_t *list = get_safe_ptr_action(obj_map, location);
1793 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1794 action_list_t::reverse_iterator rit;
1795 for (rit = list->rbegin(); rit != list->rend(); rit++)
1796 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
1802 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1803 * location). This function identifies the mutex according to the current
1804 * action, which is presumed to perform on the same mutex.
1805 * @param curr The current ModelAction; also denotes the object location to
1807 * @return The last unlock operation
1809 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
1811 void *location = curr->get_location();
1812 action_list_t *list = get_safe_ptr_action(obj_map, location);
1813 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1814 action_list_t::reverse_iterator rit;
1815 for (rit = list->rbegin(); rit != list->rend(); rit++)
1816 if ((*rit)->is_unlock() || (*rit)->is_wait())
1821 ModelAction * ModelChecker::get_parent_action(thread_id_t tid)
1823 ModelAction *parent = get_last_action(tid);
1825 parent = get_thread(tid)->get_creation();
1830 * Returns the clock vector for a given thread.
1831 * @param tid The thread whose clock vector we want
1832 * @return Desired clock vector
1834 ClockVector * ModelChecker::get_cv(thread_id_t tid)
1836 return get_parent_action(tid)->get_cv();
1840 * Resolve a set of Promises with a current write. The set is provided in the
1841 * Node corresponding to @a write.
1842 * @param write The ModelAction that is fulfilling Promises
1843 * @return True if promises were resolved; false otherwise
1845 bool ModelChecker::resolve_promises(ModelAction *write)
1847 bool resolved = false;
1848 std::vector< thread_id_t, ModelAlloc<thread_id_t> > threads_to_check;
1850 for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
1851 Promise *promise = (*promises)[promise_index];
1852 if (write->get_node()->get_promise(i)) {
1853 ModelAction *read = promise->get_action();
1854 if (read->is_rmw()) {
1855 mo_graph->addRMWEdge(write, read);
1857 read->read_from(write);
1858 //First fix up the modification order for actions that happened
1860 r_modification_order(read, write);
1861 //Next fix up the modification order for actions that happened
1863 post_r_modification_order(read, write);
1864 //Make sure the promise's value matches the write's value
1865 ASSERT(promise->get_value() == write->get_value());
1868 promises->erase(promises->begin() + promise_index);
1869 threads_to_check.push_back(read->get_tid());
1876 //Check whether reading these writes has made threads unable to
1879 for(unsigned int i=0;i<threads_to_check.size();i++)
1880 mo_check_promises(threads_to_check[i], write);
1886 * Compute the set of promises that could potentially be satisfied by this
1887 * action. Note that the set computation actually appears in the Node, not in
1889 * @param curr The ModelAction that may satisfy promises
1891 void ModelChecker::compute_promises(ModelAction *curr)
1893 for (unsigned int i = 0; i < promises->size(); i++) {
1894 Promise *promise = (*promises)[i];
1895 const ModelAction *act = promise->get_action();
1896 if (!act->happens_before(curr) &&
1898 !act->could_synchronize_with(curr) &&
1899 !act->same_thread(curr) &&
1900 act->get_location() == curr->get_location() &&
1901 promise->get_value() == curr->get_value()) {
1902 curr->get_node()->set_promise(i, act->is_rmw());
1907 /** Checks promises in response to change in ClockVector Threads. */
1908 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
1910 for (unsigned int i = 0; i < promises->size(); i++) {
1911 Promise *promise = (*promises)[i];
1912 const ModelAction *act = promise->get_action();
1913 if ((old_cv == NULL || !old_cv->synchronized_since(act)) &&
1914 merge_cv->synchronized_since(act)) {
1915 if (promise->increment_threads(tid)) {
1916 //Promise has failed
1917 failed_promise = true;
1924 void ModelChecker::check_promises_thread_disabled() {
1925 for (unsigned int i = 0; i < promises->size(); i++) {
1926 Promise *promise = (*promises)[i];
1927 if (promise->check_promise()) {
1928 failed_promise = true;
1934 /** Checks promises in response to addition to modification order for threads.
1936 * pthread is the thread that performed the read that created the promise
1938 * pread is the read that created the promise
1940 * pwrite is either the first write to same location as pread by
1941 * pthread that is sequenced after pread or the value read by the
1942 * first read to the same lcoation as pread by pthread that is
1943 * sequenced after pread..
1945 * 1. If tid=pthread, then we check what other threads are reachable
1946 * through the mode order starting with pwrite. Those threads cannot
1947 * perform a write that will resolve the promise due to modification
1948 * order constraints.
1950 * 2. If the tid is not pthread, we check whether pwrite can reach the
1951 * action write through the modification order. If so, that thread
1952 * cannot perform a future write that will resolve the promise due to
1953 * modificatin order constraints.
1955 * @parem tid The thread that either read from the model action
1956 * write, or actually did the model action write.
1958 * @parem write The ModelAction representing the relevant write.
1961 void ModelChecker::mo_check_promises(thread_id_t tid, const ModelAction *write) {
1962 void * location = write->get_location();
1963 for (unsigned int i = 0; i < promises->size(); i++) {
1964 Promise *promise = (*promises)[i];
1965 const ModelAction *act = promise->get_action();
1967 //Is this promise on the same location?
1968 if ( act->get_location() != location )
1971 //same thread as the promise
1972 if ( act->get_tid()==tid ) {
1974 //do we have a pwrite for the promise, if not, set it
1975 if (promise->get_write() == NULL ) {
1976 promise->set_write(write);
1977 //The pwrite cannot happen before the promise
1978 if (write->happens_before(act) && (write != act)) {
1979 failed_promise = true;
1983 if (mo_graph->checkPromise(write, promise)) {
1984 failed_promise = true;
1989 //Don't do any lookups twice for the same thread
1990 if (promise->has_sync_thread(tid))
1993 if (promise->get_write()&&mo_graph->checkReachable(promise->get_write(), write)) {
1994 if (promise->increment_threads(tid)) {
1995 failed_promise = true;
2003 * Compute the set of writes that may break the current pending release
2004 * sequence. This information is extracted from previou release sequence
2007 * @param curr The current ModelAction. Must be a release sequence fixup
2010 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2012 if (pending_rel_seqs->empty())
2015 struct release_seq *pending = pending_rel_seqs->back();
2016 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2017 const ModelAction *write = pending->writes[i];
2018 curr->get_node()->add_relseq_break(write);
2021 /* NULL means don't break the sequence; just synchronize */
2022 curr->get_node()->add_relseq_break(NULL);
2026 * Build up an initial set of all past writes that this 'read' action may read
2027 * from. This set is determined by the clock vector's "happens before"
2029 * @param curr is the current ModelAction that we are exploring; it must be a
2032 void ModelChecker::build_reads_from_past(ModelAction *curr)
2034 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2036 ASSERT(curr->is_read());
2038 ModelAction *last_seq_cst = NULL;
2040 /* Track whether this object has been initialized */
2041 bool initialized = false;
2043 if (curr->is_seqcst()) {
2044 last_seq_cst = get_last_seq_cst(curr);
2045 /* We have to at least see the last sequentially consistent write,
2046 so we are initialized. */
2047 if (last_seq_cst != NULL)
2051 /* Iterate over all threads */
2052 for (i = 0; i < thrd_lists->size(); i++) {
2053 /* Iterate over actions in thread, starting from most recent */
2054 action_list_t *list = &(*thrd_lists)[i];
2055 action_list_t::reverse_iterator rit;
2056 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2057 ModelAction *act = *rit;
2059 /* Only consider 'write' actions */
2060 if (!act->is_write() || act == curr)
2063 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2064 if (!curr->is_seqcst() || (!act->is_seqcst() && (last_seq_cst == NULL || !act->happens_before(last_seq_cst))) || act == last_seq_cst) {
2065 if (!curr->get_sleep_flag() || curr->is_seqcst() || sleep_can_read_from(curr, act)) {
2066 DEBUG("Adding action to may_read_from:\n");
2067 if (DBG_ENABLED()) {
2071 curr->get_node()->add_read_from(act);
2075 /* Include at most one act per-thread that "happens before" curr */
2076 if (act->happens_before(curr)) {
2084 /** @todo Need a more informative way of reporting errors. */
2085 printf("ERROR: may read from uninitialized atomic\n");
2089 if (DBG_ENABLED() || !initialized) {
2090 printf("Reached read action:\n");
2092 printf("Printing may_read_from\n");
2093 curr->get_node()->print_may_read_from();
2094 printf("End printing may_read_from\n");
2098 bool ModelChecker::sleep_can_read_from(ModelAction * curr, const ModelAction *write) {
2100 Node *prevnode=write->get_node()->get_parent();
2102 bool thread_sleep=prevnode->enabled_status(curr->get_tid())==THREAD_SLEEP_SET;
2103 if (write->is_release()&&thread_sleep)
2105 if (!write->is_rmw()) {
2108 if (write->get_reads_from()==NULL)
2110 write=write->get_reads_from();
2114 static void print_list(action_list_t *list)
2116 action_list_t::iterator it;
2118 printf("---------------------------------------------------------------------\n");
2120 unsigned int hash=0;
2122 for (it = list->begin(); it != list->end(); it++) {
2124 hash=hash^(hash<<3)^((*it)->hash());
2126 printf("HASH %u\n", hash);
2127 printf("---------------------------------------------------------------------\n");
2130 #if SUPPORT_MOD_ORDER_DUMP
2131 void ModelChecker::dumpGraph(char *filename) {
2133 sprintf(buffer, "%s.dot",filename);
2134 FILE *file=fopen(buffer, "w");
2135 fprintf(file, "digraph %s {\n",filename);
2136 mo_graph->dumpNodes(file);
2137 ModelAction ** thread_array=(ModelAction **)model_calloc(1, sizeof(ModelAction *)*get_num_threads());
2139 for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2140 ModelAction *action=*it;
2141 if (action->is_read()) {
2142 fprintf(file, "N%u [label=\"%u, T%u\"];\n", action->get_seq_number(),action->get_seq_number(), action->get_tid());
2143 if (action->get_reads_from()!=NULL)
2144 fprintf(file, "N%u -> N%u[label=\"rf\", color=red];\n", action->get_seq_number(), action->get_reads_from()->get_seq_number());
2146 if (thread_array[action->get_tid()] != NULL) {
2147 fprintf(file, "N%u -> N%u[label=\"sb\", color=blue];\n", thread_array[action->get_tid()]->get_seq_number(), action->get_seq_number());
2150 thread_array[action->get_tid()]=action;
2152 fprintf(file,"}\n");
2153 model_free(thread_array);
2158 void ModelChecker::print_summary()
2161 printf("Number of executions: %d\n", num_executions);
2162 printf("Number of feasible executions: %d\n", num_feasible_executions);
2163 printf("Total nodes created: %d\n", node_stack->get_total_nodes());
2165 #if SUPPORT_MOD_ORDER_DUMP
2167 char buffername[100];
2168 sprintf(buffername, "exec%04u", num_executions);
2169 mo_graph->dumpGraphToFile(buffername);
2170 sprintf(buffername, "graph%04u", num_executions);
2171 dumpGraph(buffername);
2174 if (!isfinalfeasible())
2175 printf("INFEASIBLE EXECUTION!\n");
2176 print_list(action_trace);
2181 * Add a Thread to the system for the first time. Should only be called once
2183 * @param t The Thread to add
2185 void ModelChecker::add_thread(Thread *t)
2187 thread_map->put(id_to_int(t->get_id()), t);
2188 scheduler->add_thread(t);
2192 * Removes a thread from the scheduler.
2193 * @param the thread to remove.
2195 void ModelChecker::remove_thread(Thread *t)
2197 scheduler->remove_thread(t);
2201 * @brief Get a Thread reference by its ID
2202 * @param tid The Thread's ID
2203 * @return A Thread reference
2205 Thread * ModelChecker::get_thread(thread_id_t tid) const
2207 return thread_map->get(id_to_int(tid));
2211 * @brief Get a reference to the Thread in which a ModelAction was executed
2212 * @param act The ModelAction
2213 * @return A Thread reference
2215 Thread * ModelChecker::get_thread(ModelAction *act) const
2217 return get_thread(act->get_tid());
2221 * @brief Check if a Thread is currently enabled
2222 * @param t The Thread to check
2223 * @return True if the Thread is currently enabled
2225 bool ModelChecker::is_enabled(Thread *t) const
2227 return scheduler->is_enabled(t);
2231 * @brief Check if a Thread is currently enabled
2232 * @param tid The ID of the Thread to check
2233 * @return True if the Thread is currently enabled
2235 bool ModelChecker::is_enabled(thread_id_t tid) const
2237 return scheduler->is_enabled(tid);
2241 * Switch from a user-context to the "master thread" context (a.k.a. system
2242 * context). This switch is made with the intention of exploring a particular
2243 * model-checking action (described by a ModelAction object). Must be called
2244 * from a user-thread context.
2246 * @param act The current action that will be explored. May be NULL only if
2247 * trace is exiting via an assertion (see ModelChecker::set_assert and
2248 * ModelChecker::has_asserted).
2249 * @return Return status from the 'swap' call (i.e., success/fail, 0/-1)
2251 int ModelChecker::switch_to_master(ModelAction *act)
2254 Thread *old = thread_current();
2255 set_current_action(act);
2256 old->set_state(THREAD_READY);
2257 return Thread::swap(old, &system_context);
2261 * Takes the next step in the execution, if possible.
2262 * @return Returns true (success) if a step was taken and false otherwise.
2264 bool ModelChecker::take_step() {
2268 Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
2270 if (curr->get_state() == THREAD_READY) {
2271 ASSERT(priv->current_action);
2273 priv->nextThread = check_current_action(priv->current_action);
2274 priv->current_action = NULL;
2276 if (curr->is_blocked() || curr->is_complete())
2277 scheduler->remove_thread(curr);
2282 Thread *next = scheduler->next_thread(priv->nextThread);
2284 /* Infeasible -> don't take any more steps */
2288 if (params.bound != 0) {
2289 if (priv->used_sequence_numbers > params.bound) {
2294 DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
2295 next ? id_to_int(next->get_id()) : -1);
2298 * Launch end-of-execution release sequence fixups only when there are:
2300 * (1) no more user threads to run (or when execution replay chooses
2301 * the 'model_thread')
2302 * (2) pending release sequences
2303 * (3) pending assertions (i.e., data races)
2304 * (4) no pending promises
2306 if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
2307 isfinalfeasible() && !unrealizedraces.empty()) {
2308 printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
2309 pending_rel_seqs->size());
2310 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2311 std::memory_order_seq_cst, NULL, VALUE_NONE,
2313 set_current_action(fixup);
2317 /* next == NULL -> don't take any more steps */
2321 next->set_state(THREAD_RUNNING);
2323 if (next->get_pending() != NULL) {
2324 /* restart a pending action */
2325 set_current_action(next->get_pending());
2326 next->set_pending(NULL);
2327 next->set_state(THREAD_READY);
2331 /* Return false only if swap fails with an error */
2332 return (Thread::swap(&system_context, next) == 0);
2335 /** Runs the current execution until threre are no more steps to take. */
2336 void ModelChecker::finish_execution() {
2339 while (take_step());