10 #include "nodestack.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
32 failed_promise(false),
33 too_many_reads(false),
34 no_valid_reads(false),
35 bad_synchronization(false),
39 ~model_snapshot_members() {
40 for (unsigned int i = 0; i < bugs.size(); i++)
45 unsigned int next_thread_id;
46 modelclock_t used_sequence_numbers;
47 ModelAction *next_backtrack;
48 SnapVector<bug_message *> bugs;
52 /** @brief Incorrectly-ordered synchronization was made */
53 bool bad_synchronization;
59 /** @brief Constructor */
60 ModelExecution::ModelExecution(ModelChecker *m,
61 struct model_params *params,
63 NodeStack *node_stack) :
69 obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
70 condvar_waiters_map(),
76 thrd_last_fence_release(),
77 node_stack(node_stack),
78 priv(new struct model_snapshot_members()),
79 mo_graph(new CycleGraph())
81 /* Initialize a model-checker thread, for special ModelActions */
82 model_thread = new Thread(get_next_id());
83 thread_map.put(id_to_int(model_thread->get_id()), model_thread);
84 scheduler->register_engine(this);
87 /** @brief Destructor */
88 ModelExecution::~ModelExecution()
90 for (unsigned int i = 0; i < get_num_threads(); i++)
91 delete thread_map.get(i);
95 for (unsigned int i = 0; i < promises.size(); i++)
102 int ModelExecution::get_execution_number() const
104 return model->get_execution_number();
107 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
109 action_list_t *tmp = hash->get(ptr);
111 tmp = new action_list_t();
117 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
119 SnapVector<action_list_t> *tmp = hash->get(ptr);
121 tmp = new SnapVector<action_list_t>();
127 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
129 SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
132 unsigned int thread=id_to_int(tid);
133 if (thread < wrv->size())
134 return &(*wrv)[thread];
139 /** @return a thread ID for a new Thread */
140 thread_id_t ModelExecution::get_next_id()
142 return priv->next_thread_id++;
145 /** @return the number of user threads created during this execution */
146 unsigned int ModelExecution::get_num_threads() const
148 return priv->next_thread_id;
151 /** @return a sequence number for a new ModelAction */
152 modelclock_t ModelExecution::get_next_seq_num()
154 return ++priv->used_sequence_numbers;
158 * @brief Should the current action wake up a given thread?
160 * @param curr The current action
161 * @param thread The thread that we might wake up
162 * @return True, if we should wake up the sleeping thread; false otherwise
164 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
166 const ModelAction *asleep = thread->get_pending();
167 /* Don't allow partial RMW to wake anyone up */
170 /* Synchronizing actions may have been backtracked */
171 if (asleep->could_synchronize_with(curr))
173 /* All acquire/release fences and fence-acquire/store-release */
174 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
176 /* Fence-release + store can awake load-acquire on the same location */
177 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
178 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
179 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
185 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
187 for (unsigned int i = 0; i < get_num_threads(); i++) {
188 Thread *thr = get_thread(int_to_id(i));
189 if (scheduler->is_sleep_set(thr)) {
190 if (should_wake_up(curr, thr))
191 /* Remove this thread from sleep set */
192 scheduler->remove_sleep(thr);
197 /** @brief Alert the model-checker that an incorrectly-ordered
198 * synchronization was made */
199 void ModelExecution::set_bad_synchronization()
201 priv->bad_synchronization = true;
204 bool ModelExecution::assert_bug(const char *msg)
206 priv->bugs.push_back(new bug_message(msg));
208 if (isfeasibleprefix()) {
215 /** @return True, if any bugs have been reported for this execution */
216 bool ModelExecution::have_bug_reports() const
218 return priv->bugs.size() != 0;
221 SnapVector<bug_message *> * ModelExecution::get_bugs() const
227 * Check whether the current trace has triggered an assertion which should halt
230 * @return True, if the execution should be aborted; false otherwise
232 bool ModelExecution::has_asserted() const
234 return priv->asserted;
238 * Trigger a trace assertion which should cause this execution to be halted.
239 * This can be due to a detected bug or due to an infeasibility that should
242 void ModelExecution::set_assert()
244 priv->asserted = true;
248 * Check if we are in a deadlock. Should only be called at the end of an
249 * execution, although it should not give false positives in the middle of an
250 * execution (there should be some ENABLED thread).
252 * @return True if program is in a deadlock; false otherwise
254 bool ModelExecution::is_deadlocked() const
256 bool blocking_threads = false;
257 for (unsigned int i = 0; i < get_num_threads(); i++) {
258 thread_id_t tid = int_to_id(i);
261 Thread *t = get_thread(tid);
262 if (!t->is_model_thread() && t->get_pending())
263 blocking_threads = true;
265 return blocking_threads;
269 * Check if this is a complete execution. That is, have all thread completed
270 * execution (rather than exiting because sleep sets have forced a redundant
273 * @return True if the execution is complete.
275 bool ModelExecution::is_complete_execution() const
277 for (unsigned int i = 0; i < get_num_threads(); i++)
278 if (is_enabled(int_to_id(i)))
284 * @brief Find the last fence-related backtracking conflict for a ModelAction
286 * This function performs the search for the most recent conflicting action
287 * against which we should perform backtracking, as affected by fence
288 * operations. This includes pairs of potentially-synchronizing actions which
289 * occur due to fence-acquire or fence-release, and hence should be explored in
290 * the opposite execution order.
292 * @param act The current action
293 * @return The most recent action which conflicts with act due to fences
295 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
297 /* Only perform release/acquire fence backtracking for stores */
298 if (!act->is_write())
301 /* Find a fence-release (or, act is a release) */
302 ModelAction *last_release;
303 if (act->is_release())
306 last_release = get_last_fence_release(act->get_tid());
310 /* Skip past the release */
311 const action_list_t *list = &action_trace;
312 action_list_t::const_reverse_iterator rit;
313 for (rit = list->rbegin(); rit != list->rend(); rit++)
314 if (*rit == last_release)
316 ASSERT(rit != list->rend());
321 * load --sb-> fence-acquire */
322 ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
323 ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
324 bool found_acquire_fences = false;
325 for ( ; rit != list->rend(); rit++) {
326 ModelAction *prev = *rit;
327 if (act->same_thread(prev))
330 int tid = id_to_int(prev->get_tid());
332 if (prev->is_read() && act->same_var(prev)) {
333 if (prev->is_acquire()) {
334 /* Found most recent load-acquire, don't need
335 * to search for more fences */
336 if (!found_acquire_fences)
339 prior_loads[tid] = prev;
342 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
343 found_acquire_fences = true;
344 acquire_fences[tid] = prev;
348 ModelAction *latest_backtrack = NULL;
349 for (unsigned int i = 0; i < acquire_fences.size(); i++)
350 if (acquire_fences[i] && prior_loads[i])
351 if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
352 latest_backtrack = acquire_fences[i];
353 return latest_backtrack;
357 * @brief Find the last backtracking conflict for a ModelAction
359 * This function performs the search for the most recent conflicting action
360 * against which we should perform backtracking. This primary includes pairs of
361 * synchronizing actions which should be explored in the opposite execution
364 * @param act The current action
365 * @return The most recent action which conflicts with act
367 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
369 switch (act->get_type()) {
370 /* case ATOMIC_FENCE: fences don't directly cause backtracking */
374 ModelAction *ret = NULL;
376 /* linear search: from most recent to oldest */
377 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
378 action_list_t::reverse_iterator rit;
379 for (rit = list->rbegin(); rit != list->rend(); rit++) {
380 ModelAction *prev = *rit;
381 if (prev->could_synchronize_with(act)) {
387 ModelAction *ret2 = get_last_fence_conflict(act);
397 case ATOMIC_TRYLOCK: {
398 /* linear search: from most recent to oldest */
399 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
400 action_list_t::reverse_iterator rit;
401 for (rit = list->rbegin(); rit != list->rend(); rit++) {
402 ModelAction *prev = *rit;
403 if (act->is_conflicting_lock(prev))
408 case ATOMIC_UNLOCK: {
409 /* linear search: from most recent to oldest */
410 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
411 action_list_t::reverse_iterator rit;
412 for (rit = list->rbegin(); rit != list->rend(); rit++) {
413 ModelAction *prev = *rit;
414 if (!act->same_thread(prev) && prev->is_failed_trylock())
420 /* linear search: from most recent to oldest */
421 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
422 action_list_t::reverse_iterator rit;
423 for (rit = list->rbegin(); rit != list->rend(); rit++) {
424 ModelAction *prev = *rit;
425 if (!act->same_thread(prev) && prev->is_failed_trylock())
427 if (!act->same_thread(prev) && prev->is_notify())
433 case ATOMIC_NOTIFY_ALL:
434 case ATOMIC_NOTIFY_ONE: {
435 /* linear search: from most recent to oldest */
436 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
437 action_list_t::reverse_iterator rit;
438 for (rit = list->rbegin(); rit != list->rend(); rit++) {
439 ModelAction *prev = *rit;
440 if (!act->same_thread(prev) && prev->is_wait())
451 /** This method finds backtracking points where we should try to
452 * reorder the parameter ModelAction against.
454 * @param the ModelAction to find backtracking points for.
456 void ModelExecution::set_backtracking(ModelAction *act)
458 Thread *t = get_thread(act);
459 ModelAction *prev = get_last_conflict(act);
463 Node *node = prev->get_node()->get_parent();
465 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
466 int low_tid, high_tid;
467 if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
468 low_tid = id_to_int(act->get_tid());
469 high_tid = low_tid + 1;
472 high_tid = get_num_threads();
475 for (int i = low_tid; i < high_tid; i++) {
476 thread_id_t tid = int_to_id(i);
478 /* Make sure this thread can be enabled here. */
479 if (i >= node->get_num_threads())
482 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
483 /* Don't backtrack into a point where the thread is disabled or sleeping. */
484 if (node->enabled_status(tid) != THREAD_ENABLED)
487 /* Check if this has been explored already */
488 if (node->has_been_explored(tid))
491 /* See if fairness allows */
492 if (params->fairwindow != 0 && !node->has_priority(tid)) {
494 for (int t = 0; t < node->get_num_threads(); t++) {
495 thread_id_t tother = int_to_id(t);
496 if (node->is_enabled(tother) && node->has_priority(tother)) {
505 /* See if CHESS-like yield fairness allows */
506 if (params->yieldon) {
508 for (int t = 0; t < node->get_num_threads(); t++) {
509 thread_id_t tother = int_to_id(t);
510 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
519 /* Cache the latest backtracking point */
520 set_latest_backtrack(prev);
522 /* If this is a new backtracking point, mark the tree */
523 if (!node->set_backtrack(tid))
525 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
526 id_to_int(prev->get_tid()),
527 id_to_int(t->get_id()));
536 * @brief Cache the a backtracking point as the "most recent", if eligible
538 * Note that this does not prepare the NodeStack for this backtracking
539 * operation, it only caches the action on a per-execution basis
541 * @param act The operation at which we should explore a different next action
542 * (i.e., backtracking point)
543 * @return True, if this action is now the most recent backtracking point;
546 bool ModelExecution::set_latest_backtrack(ModelAction *act)
548 if (!priv->next_backtrack || *act > *priv->next_backtrack) {
549 priv->next_backtrack = act;
556 * Returns last backtracking point. The model checker will explore a different
557 * path for this point in the next execution.
558 * @return The ModelAction at which the next execution should diverge.
560 ModelAction * ModelExecution::get_next_backtrack()
562 ModelAction *next = priv->next_backtrack;
563 priv->next_backtrack = NULL;
568 * Processes a read model action.
569 * @param curr is the read model action to process.
570 * @return True if processing this read updates the mo_graph.
572 bool ModelExecution::process_read(ModelAction *curr)
574 Node *node = curr->get_node();
576 bool updated = false;
577 switch (node->get_read_from_status()) {
578 case READ_FROM_PAST: {
579 const ModelAction *rf = node->get_read_from_past();
582 mo_graph->startChanges();
584 ASSERT(!is_infeasible());
585 if (!check_recency(curr, rf)) {
586 if (node->increment_read_from()) {
587 mo_graph->rollbackChanges();
590 priv->too_many_reads = true;
594 updated = r_modification_order(curr, rf);
596 mo_graph->commitChanges();
597 mo_check_promises(curr, true);
600 case READ_FROM_PROMISE: {
601 Promise *promise = curr->get_node()->get_read_from_promise();
602 if (promise->add_reader(curr))
603 priv->failed_promise = true;
604 curr->set_read_from_promise(promise);
605 mo_graph->startChanges();
606 if (!check_recency(curr, promise))
607 priv->too_many_reads = true;
608 updated = r_modification_order(curr, promise);
609 mo_graph->commitChanges();
612 case READ_FROM_FUTURE: {
613 /* Read from future value */
614 struct future_value fv = node->get_future_value();
615 Promise *promise = new Promise(this, curr, fv);
616 curr->set_read_from_promise(promise);
617 promises.push_back(promise);
618 mo_graph->startChanges();
619 updated = r_modification_order(curr, promise);
620 mo_graph->commitChanges();
626 get_thread(curr)->set_return_value(curr->get_return_value());
632 * Processes a lock, trylock, or unlock model action. @param curr is
633 * the read model action to process.
635 * The try lock operation checks whether the lock is taken. If not,
636 * it falls to the normal lock operation case. If so, it returns
639 * The lock operation has already been checked that it is enabled, so
640 * it just grabs the lock and synchronizes with the previous unlock.
642 * The unlock operation has to re-enable all of the threads that are
643 * waiting on the lock.
645 * @return True if synchronization was updated; false otherwise
647 bool ModelExecution::process_mutex(ModelAction *curr)
649 std::mutex *mutex = curr->get_mutex();
650 struct std::mutex_state *state = NULL;
653 state = mutex->get_state();
655 switch (curr->get_type()) {
656 case ATOMIC_TRYLOCK: {
657 bool success = !state->locked;
658 curr->set_try_lock(success);
660 get_thread(curr)->set_return_value(0);
663 get_thread(curr)->set_return_value(1);
665 //otherwise fall into the lock case
667 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
668 assert_bug("Lock access before initialization");
669 state->locked = get_thread(curr);
670 ModelAction *unlock = get_last_unlock(curr);
671 //synchronize with the previous unlock statement
672 if (unlock != NULL) {
673 synchronize(unlock, curr);
679 case ATOMIC_UNLOCK: {
680 /* wake up the other threads */
681 for (unsigned int i = 0; i < get_num_threads(); i++) {
682 Thread *t = get_thread(int_to_id(i));
683 Thread *curr_thrd = get_thread(curr);
684 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
688 /* unlock the lock - after checking who was waiting on it */
689 state->locked = NULL;
691 if (!curr->is_wait())
692 break; /* The rest is only for ATOMIC_WAIT */
694 /* Should we go to sleep? (simulate spurious failures) */
695 if (curr->get_node()->get_misc() == 0) {
696 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
698 scheduler->sleep(get_thread(curr));
702 case ATOMIC_NOTIFY_ALL: {
703 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
704 //activate all the waiting threads
705 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
706 scheduler->wake(get_thread(*rit));
711 case ATOMIC_NOTIFY_ONE: {
712 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
713 int wakeupthread = curr->get_node()->get_misc();
714 action_list_t::iterator it = waiters->begin();
715 advance(it, wakeupthread);
716 scheduler->wake(get_thread(*it));
728 * @brief Check if the current pending promises allow a future value to be sent
730 * If one of the following is true:
731 * (a) there are no pending promises
732 * (b) the reader and writer do not cross any promises
733 * Then, it is safe to pass a future value back now.
735 * Otherwise, we must save the pending future value until (a) or (b) is true
737 * @param writer The operation which sends the future value. Must be a write.
738 * @param reader The operation which will observe the value. Must be a read.
739 * @return True if the future value can be sent now; false if it must wait.
741 bool ModelExecution::promises_may_allow(const ModelAction *writer,
742 const ModelAction *reader) const
744 if (promises.empty())
746 for (int i = promises.size() - 1; i >= 0; i--) {
747 ModelAction *pr = promises[i]->get_reader(0);
748 //reader is after promise...doesn't cross any promise
751 //writer is after promise, reader before...bad...
759 * @brief Add a future value to a reader
761 * This function performs a few additional checks to ensure that the future
762 * value can be feasibly observed by the reader
764 * @param writer The operation whose value is sent. Must be a write.
765 * @param reader The read operation which may read the future value. Must be a read.
767 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
769 /* Do more ambitious checks now that mo is more complete */
770 if (!mo_may_allow(writer, reader))
773 Node *node = reader->get_node();
775 /* Find an ancestor thread which exists at the time of the reader */
776 Thread *write_thread = get_thread(writer);
777 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
778 write_thread = write_thread->get_parent();
780 struct future_value fv = {
781 writer->get_write_value(),
782 writer->get_seq_number() + params->maxfuturedelay,
783 write_thread->get_id(),
785 if (node->add_future_value(fv))
786 set_latest_backtrack(reader);
790 * Process a write ModelAction
791 * @param curr The ModelAction to process
792 * @return True if the mo_graph was updated or promises were resolved
794 bool ModelExecution::process_write(ModelAction *curr)
796 /* Readers to which we may send our future value */
797 ModelVector<ModelAction *> send_fv;
799 const ModelAction *earliest_promise_reader;
800 bool updated_promises = false;
802 bool updated_mod_order = w_modification_order(curr, &send_fv);
803 Promise *promise = pop_promise_to_resolve(curr);
806 earliest_promise_reader = promise->get_reader(0);
807 updated_promises = resolve_promise(curr, promise);
809 earliest_promise_reader = NULL;
811 for (unsigned int i = 0; i < send_fv.size(); i++) {
812 ModelAction *read = send_fv[i];
814 /* Don't send future values to reads after the Promise we resolve */
815 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
816 /* Check if future value can be sent immediately */
817 if (promises_may_allow(curr, read)) {
818 add_future_value(curr, read);
820 futurevalues.push_back(PendingFutureValue(curr, read));
825 /* Check the pending future values */
826 for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
827 struct PendingFutureValue pfv = futurevalues[i];
828 if (promises_may_allow(pfv.writer, pfv.reader)) {
829 add_future_value(pfv.writer, pfv.reader);
830 futurevalues.erase(futurevalues.begin() + i);
834 mo_graph->commitChanges();
835 mo_check_promises(curr, false);
837 get_thread(curr)->set_return_value(VALUE_NONE);
838 return updated_mod_order || updated_promises;
842 * Process a fence ModelAction
843 * @param curr The ModelAction to process
844 * @return True if synchronization was updated
846 bool ModelExecution::process_fence(ModelAction *curr)
849 * fence-relaxed: no-op
850 * fence-release: only log the occurence (not in this function), for
851 * use in later synchronization
852 * fence-acquire (this function): search for hypothetical release
854 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
856 bool updated = false;
857 if (curr->is_acquire()) {
858 action_list_t *list = &action_trace;
859 action_list_t::reverse_iterator rit;
860 /* Find X : is_read(X) && X --sb-> curr */
861 for (rit = list->rbegin(); rit != list->rend(); rit++) {
862 ModelAction *act = *rit;
865 if (act->get_tid() != curr->get_tid())
867 /* Stop at the beginning of the thread */
868 if (act->is_thread_start())
870 /* Stop once we reach a prior fence-acquire */
871 if (act->is_fence() && act->is_acquire())
875 /* read-acquire will find its own release sequences */
876 if (act->is_acquire())
879 /* Establish hypothetical release sequences */
880 rel_heads_list_t release_heads;
881 get_release_seq_heads(curr, act, &release_heads);
882 for (unsigned int i = 0; i < release_heads.size(); i++)
883 synchronize(release_heads[i], curr);
884 if (release_heads.size() != 0)
892 * @brief Process the current action for thread-related activity
894 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
895 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
896 * synchronization, etc. This function is a no-op for non-THREAD actions
897 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
899 * @param curr The current action
900 * @return True if synchronization was updated or a thread completed
902 bool ModelExecution::process_thread_action(ModelAction *curr)
904 bool updated = false;
906 switch (curr->get_type()) {
907 case THREAD_CREATE: {
908 thrd_t *thrd = (thrd_t *)curr->get_location();
909 struct thread_params *params = (struct thread_params *)curr->get_value();
910 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
912 th->set_creation(curr);
913 /* Promises can be satisfied by children */
914 for (unsigned int i = 0; i < promises.size(); i++) {
915 Promise *promise = promises[i];
916 if (promise->thread_is_available(curr->get_tid()))
917 promise->add_thread(th->get_id());
922 Thread *blocking = curr->get_thread_operand();
923 ModelAction *act = get_last_action(blocking->get_id());
924 synchronize(act, curr);
925 updated = true; /* trigger rel-seq checks */
928 case THREAD_FINISH: {
929 Thread *th = get_thread(curr);
930 /* Wake up any joining threads */
931 for (unsigned int i = 0; i < get_num_threads(); i++) {
932 Thread *waiting = get_thread(int_to_id(i));
933 if (waiting->waiting_on() == th &&
934 waiting->get_pending()->is_thread_join())
935 scheduler->wake(waiting);
938 /* Completed thread can't satisfy promises */
939 for (unsigned int i = 0; i < promises.size(); i++) {
940 Promise *promise = promises[i];
941 if (promise->thread_is_available(th->get_id()))
942 if (promise->eliminate_thread(th->get_id()))
943 priv->failed_promise = true;
945 updated = true; /* trigger rel-seq checks */
949 check_promises(curr->get_tid(), NULL, curr->get_cv());
960 * @brief Process the current action for release sequence fixup activity
962 * Performs model-checker release sequence fixups for the current action,
963 * forcing a single pending release sequence to break (with a given, potential
964 * "loose" write) or to complete (i.e., synchronize). If a pending release
965 * sequence forms a complete release sequence, then we must perform the fixup
966 * synchronization, mo_graph additions, etc.
968 * @param curr The current action; must be a release sequence fixup action
969 * @param work_queue The work queue to which to add work items as they are
972 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
974 const ModelAction *write = curr->get_node()->get_relseq_break();
975 struct release_seq *sequence = pending_rel_seqs.back();
976 pending_rel_seqs.pop_back();
978 ModelAction *acquire = sequence->acquire;
979 const ModelAction *rf = sequence->rf;
980 const ModelAction *release = sequence->release;
984 ASSERT(release->same_thread(rf));
988 * @todo Forcing a synchronization requires that we set
989 * modification order constraints. For instance, we can't allow
990 * a fixup sequence in which two separate read-acquire
991 * operations read from the same sequence, where the first one
992 * synchronizes and the other doesn't. Essentially, we can't
993 * allow any writes to insert themselves between 'release' and
997 /* Must synchronize */
998 if (!synchronize(release, acquire))
1000 /* Re-check all pending release sequences */
1001 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1002 /* Re-check act for mo_graph edges */
1003 work_queue->push_back(MOEdgeWorkEntry(acquire));
1005 /* propagate synchronization to later actions */
1006 action_list_t::reverse_iterator rit = action_trace.rbegin();
1007 for (; (*rit) != acquire; rit++) {
1008 ModelAction *propagate = *rit;
1009 if (acquire->happens_before(propagate)) {
1010 synchronize(acquire, propagate);
1011 /* Re-check 'propagate' for mo_graph edges */
1012 work_queue->push_back(MOEdgeWorkEntry(propagate));
1016 /* Break release sequence with new edges:
1017 * release --mo--> write --mo--> rf */
1018 mo_graph->addEdge(release, write);
1019 mo_graph->addEdge(write, rf);
1022 /* See if we have realized a data race */
1027 * Initialize the current action by performing one or more of the following
1028 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1029 * in the NodeStack, manipulating backtracking sets, allocating and
1030 * initializing clock vectors, and computing the promises to fulfill.
1032 * @param curr The current action, as passed from the user context; may be
1033 * freed/invalidated after the execution of this function, with a different
1034 * action "returned" its place (pass-by-reference)
1035 * @return True if curr is a newly-explored action; false otherwise
1037 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1039 ModelAction *newcurr;
1041 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1042 newcurr = process_rmw(*curr);
1045 if (newcurr->is_rmw())
1046 compute_promises(newcurr);
1052 (*curr)->set_seq_number(get_next_seq_num());
1054 newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1056 /* First restore type and order in case of RMW operation */
1057 if ((*curr)->is_rmwr())
1058 newcurr->copy_typeandorder(*curr);
1060 ASSERT((*curr)->get_location() == newcurr->get_location());
1061 newcurr->copy_from_new(*curr);
1063 /* Discard duplicate ModelAction; use action from NodeStack */
1066 /* Always compute new clock vector */
1067 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1070 return false; /* Action was explored previously */
1074 /* Always compute new clock vector */
1075 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1077 /* Assign most recent release fence */
1078 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1081 * Perform one-time actions when pushing new ModelAction onto
1084 if (newcurr->is_write())
1085 compute_promises(newcurr);
1086 else if (newcurr->is_relseq_fixup())
1087 compute_relseq_breakwrites(newcurr);
1088 else if (newcurr->is_wait())
1089 newcurr->get_node()->set_misc_max(2);
1090 else if (newcurr->is_notify_one()) {
1091 newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1093 return true; /* This was a new ModelAction */
1098 * @brief Establish reads-from relation between two actions
1100 * Perform basic operations involved with establishing a concrete rf relation,
1101 * including setting the ModelAction data and checking for release sequences.
1103 * @param act The action that is reading (must be a read)
1104 * @param rf The action from which we are reading (must be a write)
1106 * @return True if this read established synchronization
1108 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1111 ASSERT(rf->is_write());
1113 act->set_read_from(rf);
1114 if (act->is_acquire()) {
1115 rel_heads_list_t release_heads;
1116 get_release_seq_heads(act, act, &release_heads);
1117 int num_heads = release_heads.size();
1118 for (unsigned int i = 0; i < release_heads.size(); i++)
1119 if (!synchronize(release_heads[i], act))
1121 return num_heads > 0;
1127 * @brief Synchronizes two actions
1129 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1130 * This function performs the synchronization as well as providing other hooks
1131 * for other checks along with synchronization.
1133 * @param first The left-hand side of the synchronizes-with relation
1134 * @param second The right-hand side of the synchronizes-with relation
1135 * @return True if the synchronization was successful (i.e., was consistent
1136 * with the execution order); false otherwise
1138 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1140 if (*second < *first) {
1141 set_bad_synchronization();
1144 check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1145 return second->synchronize_with(first);
1149 * Check promises and eliminate potentially-satisfying threads when a thread is
1150 * blocked (e.g., join, lock). A thread which is waiting on another thread can
1151 * no longer satisfy a promise generated from that thread.
1153 * @param blocker The thread on which a thread is waiting
1154 * @param waiting The waiting thread
1156 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1158 for (unsigned int i = 0; i < promises.size(); i++) {
1159 Promise *promise = promises[i];
1160 if (!promise->thread_is_available(waiting->get_id()))
1162 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1163 ModelAction *reader = promise->get_reader(j);
1164 if (reader->get_tid() != blocker->get_id())
1166 if (promise->eliminate_thread(waiting->get_id())) {
1167 /* Promise has failed */
1168 priv->failed_promise = true;
1170 /* Only eliminate the 'waiting' thread once */
1178 * @brief Check whether a model action is enabled.
1180 * Checks whether a lock or join operation would be successful (i.e., is the
1181 * lock already locked, or is the joined thread already complete). If not, put
1182 * the action in a waiter list.
1184 * @param curr is the ModelAction to check whether it is enabled.
1185 * @return a bool that indicates whether the action is enabled.
1187 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1188 if (curr->is_lock()) {
1189 std::mutex *lock = curr->get_mutex();
1190 struct std::mutex_state *state = lock->get_state();
1193 } else if (curr->is_thread_join()) {
1194 Thread *blocking = curr->get_thread_operand();
1195 if (!blocking->is_complete()) {
1196 thread_blocking_check_promises(blocking, get_thread(curr));
1205 * This is the heart of the model checker routine. It performs model-checking
1206 * actions corresponding to a given "current action." Among other processes, it
1207 * calculates reads-from relationships, updates synchronization clock vectors,
1208 * forms a memory_order constraints graph, and handles replay/backtrack
1209 * execution when running permutations of previously-observed executions.
1211 * @param curr The current action to process
1212 * @return The ModelAction that is actually executed; may be different than
1213 * curr; may be NULL, if the current action is not enabled to run
1215 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1218 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1219 bool newly_explored = initialize_curr_action(&curr);
1223 wake_up_sleeping_actions(curr);
1225 /* Compute fairness information for CHESS yield algorithm */
1226 if (params->yieldon) {
1227 curr->get_node()->update_yield(scheduler);
1230 /* Add the action to lists before any other model-checking tasks */
1231 if (!second_part_of_rmw)
1232 add_action_to_lists(curr);
1234 /* Build may_read_from set for newly-created actions */
1235 if (newly_explored && curr->is_read())
1236 build_may_read_from(curr);
1238 /* Initialize work_queue with the "current action" work */
1239 work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1240 while (!work_queue.empty() && !has_asserted()) {
1241 WorkQueueEntry work = work_queue.front();
1242 work_queue.pop_front();
1244 switch (work.type) {
1245 case WORK_CHECK_CURR_ACTION: {
1246 ModelAction *act = work.action;
1247 bool update = false; /* update this location's release seq's */
1248 bool update_all = false; /* update all release seq's */
1250 if (process_thread_action(curr))
1253 if (act->is_read() && !second_part_of_rmw && process_read(act))
1256 if (act->is_write() && process_write(act))
1259 if (act->is_fence() && process_fence(act))
1262 if (act->is_mutex_op() && process_mutex(act))
1265 if (act->is_relseq_fixup())
1266 process_relseq_fixup(curr, &work_queue);
1269 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1271 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1274 case WORK_CHECK_RELEASE_SEQ:
1275 resolve_release_sequences(work.location, &work_queue);
1277 case WORK_CHECK_MO_EDGES: {
1278 /** @todo Complete verification of work_queue */
1279 ModelAction *act = work.action;
1280 bool updated = false;
1282 if (act->is_read()) {
1283 const ModelAction *rf = act->get_reads_from();
1284 const Promise *promise = act->get_reads_from_promise();
1286 if (r_modification_order(act, rf))
1288 } else if (promise) {
1289 if (r_modification_order(act, promise))
1293 if (act->is_write()) {
1294 if (w_modification_order(act, NULL))
1297 mo_graph->commitChanges();
1300 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1309 check_curr_backtracking(curr);
1310 set_backtracking(curr);
1314 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1316 Node *currnode = curr->get_node();
1317 Node *parnode = currnode->get_parent();
1319 if ((parnode && !parnode->backtrack_empty()) ||
1320 !currnode->misc_empty() ||
1321 !currnode->read_from_empty() ||
1322 !currnode->promise_empty() ||
1323 !currnode->relseq_break_empty()) {
1324 set_latest_backtrack(curr);
1328 bool ModelExecution::promises_expired() const
1330 for (unsigned int i = 0; i < promises.size(); i++) {
1331 Promise *promise = promises[i];
1332 if (promise->get_expiration() < priv->used_sequence_numbers)
1339 * This is the strongest feasibility check available.
1340 * @return whether the current trace (partial or complete) must be a prefix of
1343 bool ModelExecution::isfeasibleprefix() const
1345 return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1349 * Print disagnostic information about an infeasible execution
1350 * @param prefix A string to prefix the output with; if NULL, then a default
1351 * message prefix will be provided
1353 void ModelExecution::print_infeasibility(const char *prefix) const
1357 if (mo_graph->checkForCycles())
1358 ptr += sprintf(ptr, "[mo cycle]");
1359 if (priv->failed_promise)
1360 ptr += sprintf(ptr, "[failed promise]");
1361 if (priv->too_many_reads)
1362 ptr += sprintf(ptr, "[too many reads]");
1363 if (priv->no_valid_reads)
1364 ptr += sprintf(ptr, "[no valid reads-from]");
1365 if (priv->bad_synchronization)
1366 ptr += sprintf(ptr, "[bad sw ordering]");
1367 if (promises_expired())
1368 ptr += sprintf(ptr, "[promise expired]");
1369 if (promises.size() != 0)
1370 ptr += sprintf(ptr, "[unresolved promise]");
1372 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1376 * Returns whether the current completed trace is feasible, except for pending
1377 * release sequences.
1379 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1381 return !is_infeasible() && promises.size() == 0;
1385 * Check if the current partial trace is infeasible. Does not check any
1386 * end-of-execution flags, which might rule out the execution. Thus, this is
1387 * useful only for ruling an execution as infeasible.
1388 * @return whether the current partial trace is infeasible.
1390 bool ModelExecution::is_infeasible() const
1392 return mo_graph->checkForCycles() ||
1393 priv->no_valid_reads ||
1394 priv->failed_promise ||
1395 priv->too_many_reads ||
1396 priv->bad_synchronization ||
1400 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1401 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1402 ModelAction *lastread = get_last_action(act->get_tid());
1403 lastread->process_rmw(act);
1404 if (act->is_rmw()) {
1405 if (lastread->get_reads_from())
1406 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1408 mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1409 mo_graph->commitChanges();
1415 * A helper function for ModelExecution::check_recency, to check if the current
1416 * thread is able to read from a different write/promise for 'params.maxreads'
1417 * number of steps and if that write/promise should become visible (i.e., is
1418 * ordered later in the modification order). This helps model memory liveness.
1420 * @param curr The current action. Must be a read.
1421 * @param rf The write/promise from which we plan to read
1422 * @param other_rf The write/promise from which we may read
1423 * @return True if we were able to read from other_rf for params.maxreads steps
1425 template <typename T, typename U>
1426 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1428 /* Need a different write/promise */
1429 if (other_rf->equals(rf))
1432 /* Only look for "newer" writes/promises */
1433 if (!mo_graph->checkReachable(rf, other_rf))
1436 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1437 action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1438 action_list_t::reverse_iterator rit = list->rbegin();
1439 ASSERT((*rit) == curr);
1440 /* Skip past curr */
1443 /* Does this write/promise work for everyone? */
1444 for (int i = 0; i < params->maxreads; i++, rit++) {
1445 ModelAction *act = *rit;
1446 if (!act->may_read_from(other_rf))
1453 * Checks whether a thread has read from the same write or Promise for too many
1454 * times without seeing the effects of a later write/Promise.
1457 * 1) there must a different write/promise that we could read from,
1458 * 2) we must have read from the same write/promise in excess of maxreads times,
1459 * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1460 * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1462 * If so, we decide that the execution is no longer feasible.
1464 * @param curr The current action. Must be a read.
1465 * @param rf The ModelAction/Promise from which we might read.
1466 * @return True if the read should succeed; false otherwise
1468 template <typename T>
1469 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1471 if (!params->maxreads)
1474 //NOTE: Next check is just optimization, not really necessary....
1475 if (curr->get_node()->get_read_from_past_size() +
1476 curr->get_node()->get_read_from_promise_size() <= 1)
1479 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1480 int tid = id_to_int(curr->get_tid());
1481 ASSERT(tid < (int)thrd_lists->size());
1482 action_list_t *list = &(*thrd_lists)[tid];
1483 action_list_t::reverse_iterator rit = list->rbegin();
1484 ASSERT((*rit) == curr);
1485 /* Skip past curr */
1488 action_list_t::reverse_iterator ritcopy = rit;
1489 /* See if we have enough reads from the same value */
1490 for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1491 if (ritcopy == list->rend())
1493 ModelAction *act = *ritcopy;
1494 if (!act->is_read())
1496 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1498 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1500 if (act->get_node()->get_read_from_past_size() +
1501 act->get_node()->get_read_from_promise_size() <= 1)
1504 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1505 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1506 if (should_read_instead(curr, rf, write))
1507 return false; /* liveness failure */
1509 for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1510 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1511 if (should_read_instead(curr, rf, promise))
1512 return false; /* liveness failure */
1518 * @brief Updates the mo_graph with the constraints imposed from the current
1521 * Basic idea is the following: Go through each other thread and find
1522 * the last action that happened before our read. Two cases:
1524 * -# The action is a write: that write must either occur before
1525 * the write we read from or be the write we read from.
1526 * -# The action is a read: the write that that action read from
1527 * must occur before the write we read from or be the same write.
1529 * @param curr The current action. Must be a read.
1530 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1531 * @return True if modification order edges were added; false otherwise
1533 template <typename rf_type>
1534 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1536 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1539 ASSERT(curr->is_read());
1541 /* Last SC fence in the current thread */
1542 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1543 ModelAction *last_sc_write = NULL;
1544 if (curr->is_seqcst())
1545 last_sc_write = get_last_seq_cst_write(curr);
1547 /* Iterate over all threads */
1548 for (i = 0; i < thrd_lists->size(); i++) {
1549 /* Last SC fence in thread i */
1550 ModelAction *last_sc_fence_thread_local = NULL;
1551 if (int_to_id((int)i) != curr->get_tid())
1552 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1554 /* Last SC fence in thread i, before last SC fence in current thread */
1555 ModelAction *last_sc_fence_thread_before = NULL;
1556 if (last_sc_fence_local)
1557 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1559 /* Iterate over actions in thread, starting from most recent */
1560 action_list_t *list = &(*thrd_lists)[i];
1561 action_list_t::reverse_iterator rit;
1562 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1563 ModelAction *act = *rit;
1568 /* Don't want to add reflexive edges on 'rf' */
1569 if (act->equals(rf)) {
1570 if (act->happens_before(curr))
1576 if (act->is_write()) {
1577 /* C++, Section 29.3 statement 5 */
1578 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1579 *act < *last_sc_fence_thread_local) {
1580 added = mo_graph->addEdge(act, rf) || added;
1583 /* C++, Section 29.3 statement 4 */
1584 else if (act->is_seqcst() && last_sc_fence_local &&
1585 *act < *last_sc_fence_local) {
1586 added = mo_graph->addEdge(act, rf) || added;
1589 /* C++, Section 29.3 statement 6 */
1590 else if (last_sc_fence_thread_before &&
1591 *act < *last_sc_fence_thread_before) {
1592 added = mo_graph->addEdge(act, rf) || added;
1597 /* C++, Section 29.3 statement 3 (second subpoint) */
1598 if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1599 added = mo_graph->addEdge(act, rf) || added;
1604 * Include at most one act per-thread that "happens
1607 if (act->happens_before(curr)) {
1608 if (act->is_write()) {
1609 added = mo_graph->addEdge(act, rf) || added;
1611 const ModelAction *prevrf = act->get_reads_from();
1612 const Promise *prevrf_promise = act->get_reads_from_promise();
1614 if (!prevrf->equals(rf))
1615 added = mo_graph->addEdge(prevrf, rf) || added;
1616 } else if (!prevrf_promise->equals(rf)) {
1617 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1626 * All compatible, thread-exclusive promises must be ordered after any
1627 * concrete loads from the same thread
1629 for (unsigned int i = 0; i < promises.size(); i++)
1630 if (promises[i]->is_compatible_exclusive(curr))
1631 added = mo_graph->addEdge(rf, promises[i]) || added;
1637 * Updates the mo_graph with the constraints imposed from the current write.
1639 * Basic idea is the following: Go through each other thread and find
1640 * the lastest action that happened before our write. Two cases:
1642 * (1) The action is a write => that write must occur before
1645 * (2) The action is a read => the write that that action read from
1646 * must occur before the current write.
1648 * This method also handles two other issues:
1650 * (I) Sequential Consistency: Making sure that if the current write is
1651 * seq_cst, that it occurs after the previous seq_cst write.
1653 * (II) Sending the write back to non-synchronizing reads.
1655 * @param curr The current action. Must be a write.
1656 * @param send_fv A vector for stashing reads to which we may pass our future
1657 * value. If NULL, then don't record any future values.
1658 * @return True if modification order edges were added; false otherwise
1660 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1662 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1665 ASSERT(curr->is_write());
1667 if (curr->is_seqcst()) {
1668 /* We have to at least see the last sequentially consistent write,
1669 so we are initialized. */
1670 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1671 if (last_seq_cst != NULL) {
1672 added = mo_graph->addEdge(last_seq_cst, curr) || added;
1676 /* Last SC fence in the current thread */
1677 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1679 /* Iterate over all threads */
1680 for (i = 0; i < thrd_lists->size(); i++) {
1681 /* Last SC fence in thread i, before last SC fence in current thread */
1682 ModelAction *last_sc_fence_thread_before = NULL;
1683 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1684 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1686 /* Iterate over actions in thread, starting from most recent */
1687 action_list_t *list = &(*thrd_lists)[i];
1688 action_list_t::reverse_iterator rit;
1689 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1690 ModelAction *act = *rit;
1693 * 1) If RMW and it actually read from something, then we
1694 * already have all relevant edges, so just skip to next
1697 * 2) If RMW and it didn't read from anything, we should
1698 * whatever edge we can get to speed up convergence.
1700 * 3) If normal write, we need to look at earlier actions, so
1701 * continue processing list.
1703 if (curr->is_rmw()) {
1704 if (curr->get_reads_from() != NULL)
1712 /* C++, Section 29.3 statement 7 */
1713 if (last_sc_fence_thread_before && act->is_write() &&
1714 *act < *last_sc_fence_thread_before) {
1715 added = mo_graph->addEdge(act, curr) || added;
1720 * Include at most one act per-thread that "happens
1723 if (act->happens_before(curr)) {
1725 * Note: if act is RMW, just add edge:
1727 * The following edge should be handled elsewhere:
1728 * readfrom(act) --mo--> act
1730 if (act->is_write())
1731 added = mo_graph->addEdge(act, curr) || added;
1732 else if (act->is_read()) {
1733 //if previous read accessed a null, just keep going
1734 if (act->get_reads_from() == NULL)
1736 added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1739 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1740 !act->same_thread(curr)) {
1741 /* We have an action that:
1742 (1) did not happen before us
1743 (2) is a read and we are a write
1744 (3) cannot synchronize with us
1745 (4) is in a different thread
1747 that read could potentially read from our write. Note that
1748 these checks are overly conservative at this point, we'll
1749 do more checks before actually removing the
1753 if (send_fv && thin_air_constraint_may_allow(curr, act)) {
1754 if (!is_infeasible())
1755 send_fv->push_back(act);
1756 else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1757 add_future_value(curr, act);
1764 * All compatible, thread-exclusive promises must be ordered after any
1765 * concrete stores to the same thread, or else they can be merged with
1768 for (unsigned int i = 0; i < promises.size(); i++)
1769 if (promises[i]->is_compatible_exclusive(curr))
1770 added = mo_graph->addEdge(curr, promises[i]) || added;
1775 /** Arbitrary reads from the future are not allowed. Section 29.3
1776 * part 9 places some constraints. This method checks one result of constraint
1777 * constraint. Others require compiler support. */
1778 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1780 if (!writer->is_rmw())
1783 if (!reader->is_rmw())
1786 for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1787 if (search == reader)
1789 if (search->get_tid() == reader->get_tid() &&
1790 search->happens_before(reader))
1798 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1799 * some constraints. This method checks one the following constraint (others
1800 * require compiler support):
1802 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1804 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1806 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1808 /* Iterate over all threads */
1809 for (i = 0; i < thrd_lists->size(); i++) {
1810 const ModelAction *write_after_read = NULL;
1812 /* Iterate over actions in thread, starting from most recent */
1813 action_list_t *list = &(*thrd_lists)[i];
1814 action_list_t::reverse_iterator rit;
1815 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1816 ModelAction *act = *rit;
1818 /* Don't disallow due to act == reader */
1819 if (!reader->happens_before(act) || reader == act)
1821 else if (act->is_write())
1822 write_after_read = act;
1823 else if (act->is_read() && act->get_reads_from() != NULL)
1824 write_after_read = act->get_reads_from();
1827 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1834 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1835 * The ModelAction under consideration is expected to be taking part in
1836 * release/acquire synchronization as an object of the "reads from" relation.
1837 * Note that this can only provide release sequence support for RMW chains
1838 * which do not read from the future, as those actions cannot be traced until
1839 * their "promise" is fulfilled. Similarly, we may not even establish the
1840 * presence of a release sequence with certainty, as some modification order
1841 * constraints may be decided further in the future. Thus, this function
1842 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1843 * and a boolean representing certainty.
1845 * @param rf The action that might be part of a release sequence. Must be a
1847 * @param release_heads A pass-by-reference style return parameter. After
1848 * execution of this function, release_heads will contain the heads of all the
1849 * relevant release sequences, if any exists with certainty
1850 * @param pending A pass-by-reference style return parameter which is only used
1851 * when returning false (i.e., uncertain). Returns most information regarding
1852 * an uncertain release sequence, including any write operations that might
1853 * break the sequence.
1854 * @return true, if the ModelExecution is certain that release_heads is complete;
1857 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1858 rel_heads_list_t *release_heads,
1859 struct release_seq *pending) const
1861 /* Only check for release sequences if there are no cycles */
1862 if (mo_graph->checkForCycles())
1865 for ( ; rf != NULL; rf = rf->get_reads_from()) {
1866 ASSERT(rf->is_write());
1868 if (rf->is_release())
1869 release_heads->push_back(rf);
1870 else if (rf->get_last_fence_release())
1871 release_heads->push_back(rf->get_last_fence_release());
1873 break; /* End of RMW chain */
1875 /** @todo Need to be smarter here... In the linux lock
1876 * example, this will run to the beginning of the program for
1878 /** @todo The way to be smarter here is to keep going until 1
1879 * thread has a release preceded by an acquire and you've seen
1882 /* acq_rel RMW is a sufficient stopping condition */
1883 if (rf->is_acquire() && rf->is_release())
1884 return true; /* complete */
1887 /* read from future: need to settle this later */
1889 return false; /* incomplete */
1892 if (rf->is_release())
1893 return true; /* complete */
1895 /* else relaxed write
1896 * - check for fence-release in the same thread (29.8, stmt. 3)
1897 * - check modification order for contiguous subsequence
1898 * -> rf must be same thread as release */
1900 const ModelAction *fence_release = rf->get_last_fence_release();
1901 /* Synchronize with a fence-release unconditionally; we don't need to
1902 * find any more "contiguous subsequence..." for it */
1904 release_heads->push_back(fence_release);
1906 int tid = id_to_int(rf->get_tid());
1907 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1908 action_list_t *list = &(*thrd_lists)[tid];
1909 action_list_t::const_reverse_iterator rit;
1911 /* Find rf in the thread list */
1912 rit = std::find(list->rbegin(), list->rend(), rf);
1913 ASSERT(rit != list->rend());
1915 /* Find the last {write,fence}-release */
1916 for (; rit != list->rend(); rit++) {
1917 if (fence_release && *(*rit) < *fence_release)
1919 if ((*rit)->is_release())
1922 if (rit == list->rend()) {
1923 /* No write-release in this thread */
1924 return true; /* complete */
1925 } else if (fence_release && *(*rit) < *fence_release) {
1926 /* The fence-release is more recent (and so, "stronger") than
1927 * the most recent write-release */
1928 return true; /* complete */
1929 } /* else, need to establish contiguous release sequence */
1930 ModelAction *release = *rit;
1932 ASSERT(rf->same_thread(release));
1934 pending->writes.clear();
1936 bool certain = true;
1937 for (unsigned int i = 0; i < thrd_lists->size(); i++) {
1938 if (id_to_int(rf->get_tid()) == (int)i)
1940 list = &(*thrd_lists)[i];
1942 /* Can we ensure no future writes from this thread may break
1943 * the release seq? */
1944 bool future_ordered = false;
1946 ModelAction *last = get_last_action(int_to_id(i));
1947 Thread *th = get_thread(int_to_id(i));
1948 if ((last && rf->happens_before(last)) ||
1951 future_ordered = true;
1953 ASSERT(!th->is_model_thread() || future_ordered);
1955 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1956 const ModelAction *act = *rit;
1957 /* Reach synchronization -> this thread is complete */
1958 if (act->happens_before(release))
1960 if (rf->happens_before(act)) {
1961 future_ordered = true;
1965 /* Only non-RMW writes can break release sequences */
1966 if (!act->is_write() || act->is_rmw())
1969 /* Check modification order */
1970 if (mo_graph->checkReachable(rf, act)) {
1971 /* rf --mo--> act */
1972 future_ordered = true;
1975 if (mo_graph->checkReachable(act, release))
1976 /* act --mo--> release */
1978 if (mo_graph->checkReachable(release, act) &&
1979 mo_graph->checkReachable(act, rf)) {
1980 /* release --mo-> act --mo--> rf */
1981 return true; /* complete */
1983 /* act may break release sequence */
1984 pending->writes.push_back(act);
1987 if (!future_ordered)
1988 certain = false; /* This thread is uncertain */
1992 release_heads->push_back(release);
1993 pending->writes.clear();
1995 pending->release = release;
2002 * An interface for getting the release sequence head(s) with which a
2003 * given ModelAction must synchronize. This function only returns a non-empty
2004 * result when it can locate a release sequence head with certainty. Otherwise,
2005 * it may mark the internal state of the ModelExecution so that it will handle
2006 * the release sequence at a later time, causing @a acquire to update its
2007 * synchronization at some later point in execution.
2009 * @param acquire The 'acquire' action that may synchronize with a release
2011 * @param read The read action that may read from a release sequence; this may
2012 * be the same as acquire, or else an earlier action in the same thread (i.e.,
2013 * when 'acquire' is a fence-acquire)
2014 * @param release_heads A pass-by-reference return parameter. Will be filled
2015 * with the head(s) of the release sequence(s), if they exists with certainty.
2016 * @see ModelExecution::release_seq_heads
2018 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2019 ModelAction *read, rel_heads_list_t *release_heads)
2021 const ModelAction *rf = read->get_reads_from();
2022 struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2023 sequence->acquire = acquire;
2024 sequence->read = read;
2026 if (!release_seq_heads(rf, release_heads, sequence)) {
2027 /* add act to 'lazy checking' list */
2028 pending_rel_seqs.push_back(sequence);
2030 snapshot_free(sequence);
2035 * Attempt to resolve all stashed operations that might synchronize with a
2036 * release sequence for a given location. This implements the "lazy" portion of
2037 * determining whether or not a release sequence was contiguous, since not all
2038 * modification order information is present at the time an action occurs.
2040 * @param location The location/object that should be checked for release
2041 * sequence resolutions. A NULL value means to check all locations.
2042 * @param work_queue The work queue to which to add work items as they are
2044 * @return True if any updates occurred (new synchronization, new mo_graph
2047 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2049 bool updated = false;
2050 SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2051 while (it != pending_rel_seqs.end()) {
2052 struct release_seq *pending = *it;
2053 ModelAction *acquire = pending->acquire;
2054 const ModelAction *read = pending->read;
2056 /* Only resolve sequences on the given location, if provided */
2057 if (location && read->get_location() != location) {
2062 const ModelAction *rf = read->get_reads_from();
2063 rel_heads_list_t release_heads;
2065 complete = release_seq_heads(rf, &release_heads, pending);
2066 for (unsigned int i = 0; i < release_heads.size(); i++)
2067 if (!acquire->has_synchronized_with(release_heads[i]))
2068 if (synchronize(release_heads[i], acquire))
2072 /* Re-check all pending release sequences */
2073 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2074 /* Re-check read-acquire for mo_graph edges */
2075 if (acquire->is_read())
2076 work_queue->push_back(MOEdgeWorkEntry(acquire));
2078 /* propagate synchronization to later actions */
2079 action_list_t::reverse_iterator rit = action_trace.rbegin();
2080 for (; (*rit) != acquire; rit++) {
2081 ModelAction *propagate = *rit;
2082 if (acquire->happens_before(propagate)) {
2083 synchronize(acquire, propagate);
2084 /* Re-check 'propagate' for mo_graph edges */
2085 work_queue->push_back(MOEdgeWorkEntry(propagate));
2090 it = pending_rel_seqs.erase(it);
2091 snapshot_free(pending);
2097 // If we resolved promises or data races, see if we have realized a data race.
2104 * Performs various bookkeeping operations for the current ModelAction. For
2105 * instance, adds action to the per-object, per-thread action vector and to the
2106 * action trace list of all thread actions.
2108 * @param act is the ModelAction to add.
2110 void ModelExecution::add_action_to_lists(ModelAction *act)
2112 int tid = id_to_int(act->get_tid());
2113 ModelAction *uninit = NULL;
2115 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2116 if (list->empty() && act->is_atomic_var()) {
2117 uninit = get_uninitialized_action(act);
2118 uninit_id = id_to_int(uninit->get_tid());
2119 list->push_front(uninit);
2121 list->push_back(act);
2123 action_trace.push_back(act);
2125 action_trace.push_front(uninit);
2127 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2128 if (tid >= (int)vec->size())
2129 vec->resize(priv->next_thread_id);
2130 (*vec)[tid].push_back(act);
2132 (*vec)[uninit_id].push_front(uninit);
2134 if ((int)thrd_last_action.size() <= tid)
2135 thrd_last_action.resize(get_num_threads());
2136 thrd_last_action[tid] = act;
2138 thrd_last_action[uninit_id] = uninit;
2140 if (act->is_fence() && act->is_release()) {
2141 if ((int)thrd_last_fence_release.size() <= tid)
2142 thrd_last_fence_release.resize(get_num_threads());
2143 thrd_last_fence_release[tid] = act;
2146 if (act->is_wait()) {
2147 void *mutex_loc = (void *) act->get_value();
2148 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2150 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2151 if (tid >= (int)vec->size())
2152 vec->resize(priv->next_thread_id);
2153 (*vec)[tid].push_back(act);
2158 * @brief Get the last action performed by a particular Thread
2159 * @param tid The thread ID of the Thread in question
2160 * @return The last action in the thread
2162 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2164 int threadid = id_to_int(tid);
2165 if (threadid < (int)thrd_last_action.size())
2166 return thrd_last_action[id_to_int(tid)];
2172 * @brief Get the last fence release performed by a particular Thread
2173 * @param tid The thread ID of the Thread in question
2174 * @return The last fence release in the thread, if one exists; NULL otherwise
2176 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2178 int threadid = id_to_int(tid);
2179 if (threadid < (int)thrd_last_fence_release.size())
2180 return thrd_last_fence_release[id_to_int(tid)];
2186 * Gets the last memory_order_seq_cst write (in the total global sequence)
2187 * performed on a particular object (i.e., memory location), not including the
2189 * @param curr The current ModelAction; also denotes the object location to
2191 * @return The last seq_cst write
2193 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2195 void *location = curr->get_location();
2196 action_list_t *list = get_safe_ptr_action(obj_map, location);
2197 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2198 action_list_t::reverse_iterator rit;
2199 for (rit = list->rbegin(); (*rit) != curr; rit++)
2201 rit++; /* Skip past curr */
2202 for ( ; rit != list->rend(); rit++)
2203 if ((*rit)->is_write() && (*rit)->is_seqcst())
2209 * Gets the last memory_order_seq_cst fence (in the total global sequence)
2210 * performed in a particular thread, prior to a particular fence.
2211 * @param tid The ID of the thread to check
2212 * @param before_fence The fence from which to begin the search; if NULL, then
2213 * search for the most recent fence in the thread.
2214 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2216 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2218 /* All fences should have NULL location */
2219 action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2220 action_list_t::reverse_iterator rit = list->rbegin();
2223 for (; rit != list->rend(); rit++)
2224 if (*rit == before_fence)
2227 ASSERT(*rit == before_fence);
2231 for (; rit != list->rend(); rit++)
2232 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2238 * Gets the last unlock operation performed on a particular mutex (i.e., memory
2239 * location). This function identifies the mutex according to the current
2240 * action, which is presumed to perform on the same mutex.
2241 * @param curr The current ModelAction; also denotes the object location to
2243 * @return The last unlock operation
2245 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2247 void *location = curr->get_location();
2248 action_list_t *list = get_safe_ptr_action(obj_map, location);
2249 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2250 action_list_t::reverse_iterator rit;
2251 for (rit = list->rbegin(); rit != list->rend(); rit++)
2252 if ((*rit)->is_unlock() || (*rit)->is_wait())
2257 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2259 ModelAction *parent = get_last_action(tid);
2261 parent = get_thread(tid)->get_creation();
2266 * Returns the clock vector for a given thread.
2267 * @param tid The thread whose clock vector we want
2268 * @return Desired clock vector
2270 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2272 return get_parent_action(tid)->get_cv();
2276 * @brief Find the promise (if any) to resolve for the current action and
2277 * remove it from the pending promise vector
2278 * @param curr The current ModelAction. Should be a write.
2279 * @return The Promise to resolve, if any; otherwise NULL
2281 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2283 for (unsigned int i = 0; i < promises.size(); i++)
2284 if (curr->get_node()->get_promise(i)) {
2285 Promise *ret = promises[i];
2286 promises.erase(promises.begin() + i);
2293 * Resolve a Promise with a current write.
2294 * @param write The ModelAction that is fulfilling Promises
2295 * @param promise The Promise to resolve
2296 * @return True if the Promise was successfully resolved; false otherwise
2298 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise)
2300 ModelVector<ModelAction *> actions_to_check;
2302 for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2303 ModelAction *read = promise->get_reader(i);
2304 read_from(read, write);
2305 actions_to_check.push_back(read);
2307 /* Make sure the promise's value matches the write's value */
2308 ASSERT(promise->is_compatible(write) && promise->same_value(write));
2309 if (!mo_graph->resolvePromise(promise, write))
2310 priv->failed_promise = true;
2313 * @todo It is possible to end up in an inconsistent state, where a
2314 * "resolved" promise may still be referenced if
2315 * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2317 * Note that the inconsistency only matters when dumping mo_graph to
2323 //Check whether reading these writes has made threads unable to
2325 for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2326 ModelAction *read = actions_to_check[i];
2327 mo_check_promises(read, true);
2334 * Compute the set of promises that could potentially be satisfied by this
2335 * action. Note that the set computation actually appears in the Node, not in
2337 * @param curr The ModelAction that may satisfy promises
2339 void ModelExecution::compute_promises(ModelAction *curr)
2341 for (unsigned int i = 0; i < promises.size(); i++) {
2342 Promise *promise = promises[i];
2343 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2346 bool satisfy = true;
2347 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2348 const ModelAction *act = promise->get_reader(j);
2349 if (act->happens_before(curr) ||
2350 act->could_synchronize_with(curr)) {
2356 curr->get_node()->set_promise(i);
2360 /** Checks promises in response to change in ClockVector Threads. */
2361 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2363 for (unsigned int i = 0; i < promises.size(); i++) {
2364 Promise *promise = promises[i];
2365 if (!promise->thread_is_available(tid))
2367 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2368 const ModelAction *act = promise->get_reader(j);
2369 if ((!old_cv || !old_cv->synchronized_since(act)) &&
2370 merge_cv->synchronized_since(act)) {
2371 if (promise->eliminate_thread(tid)) {
2372 /* Promise has failed */
2373 priv->failed_promise = true;
2381 void ModelExecution::check_promises_thread_disabled()
2383 for (unsigned int i = 0; i < promises.size(); i++) {
2384 Promise *promise = promises[i];
2385 if (promise->has_failed()) {
2386 priv->failed_promise = true;
2393 * @brief Checks promises in response to addition to modification order for
2396 * We test whether threads are still available for satisfying promises after an
2397 * addition to our modification order constraints. Those that are unavailable
2398 * are "eliminated". Once all threads are eliminated from satisfying a promise,
2399 * that promise has failed.
2401 * @param act The ModelAction which updated the modification order
2402 * @param is_read_check Should be true if act is a read and we must check for
2403 * updates to the store from which it read (there is a distinction here for
2404 * RMW's, which are both a load and a store)
2406 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2408 const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2410 for (unsigned int i = 0; i < promises.size(); i++) {
2411 Promise *promise = promises[i];
2413 // Is this promise on the same location?
2414 if (!promise->same_location(write))
2417 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2418 const ModelAction *pread = promise->get_reader(j);
2419 if (!pread->happens_before(act))
2421 if (mo_graph->checkPromise(write, promise)) {
2422 priv->failed_promise = true;
2428 // Don't do any lookups twice for the same thread
2429 if (!promise->thread_is_available(act->get_tid()))
2432 if (mo_graph->checkReachable(promise, write)) {
2433 if (mo_graph->checkPromise(write, promise)) {
2434 priv->failed_promise = true;
2442 * Compute the set of writes that may break the current pending release
2443 * sequence. This information is extracted from previou release sequence
2446 * @param curr The current ModelAction. Must be a release sequence fixup
2449 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2451 if (pending_rel_seqs.empty())
2454 struct release_seq *pending = pending_rel_seqs.back();
2455 for (unsigned int i = 0; i < pending->writes.size(); i++) {
2456 const ModelAction *write = pending->writes[i];
2457 curr->get_node()->add_relseq_break(write);
2460 /* NULL means don't break the sequence; just synchronize */
2461 curr->get_node()->add_relseq_break(NULL);
2465 * Build up an initial set of all past writes that this 'read' action may read
2466 * from, as well as any previously-observed future values that must still be valid.
2468 * @param curr is the current ModelAction that we are exploring; it must be a
2471 void ModelExecution::build_may_read_from(ModelAction *curr)
2473 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2475 ASSERT(curr->is_read());
2477 ModelAction *last_sc_write = NULL;
2479 if (curr->is_seqcst())
2480 last_sc_write = get_last_seq_cst_write(curr);
2482 /* Iterate over all threads */
2483 for (i = 0; i < thrd_lists->size(); i++) {
2484 /* Iterate over actions in thread, starting from most recent */
2485 action_list_t *list = &(*thrd_lists)[i];
2486 action_list_t::reverse_iterator rit;
2487 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2488 ModelAction *act = *rit;
2490 /* Only consider 'write' actions */
2491 if (!act->is_write() || act == curr)
2494 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2495 bool allow_read = true;
2497 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2499 else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2503 /* Only add feasible reads */
2504 mo_graph->startChanges();
2505 r_modification_order(curr, act);
2506 if (!is_infeasible())
2507 curr->get_node()->add_read_from_past(act);
2508 mo_graph->rollbackChanges();
2511 /* Include at most one act per-thread that "happens before" curr */
2512 if (act->happens_before(curr))
2517 /* Inherit existing, promised future values */
2518 for (i = 0; i < promises.size(); i++) {
2519 const Promise *promise = promises[i];
2520 const ModelAction *promise_read = promise->get_reader(0);
2521 if (promise_read->same_var(curr)) {
2522 /* Only add feasible future-values */
2523 mo_graph->startChanges();
2524 r_modification_order(curr, promise);
2525 if (!is_infeasible())
2526 curr->get_node()->add_read_from_promise(promise_read);
2527 mo_graph->rollbackChanges();
2531 /* We may find no valid may-read-from only if the execution is doomed */
2532 if (!curr->get_node()->read_from_size()) {
2533 priv->no_valid_reads = true;
2537 if (DBG_ENABLED()) {
2538 model_print("Reached read action:\n");
2540 model_print("Printing read_from_past\n");
2541 curr->get_node()->print_read_from_past();
2542 model_print("End printing read_from_past\n");
2546 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2548 for ( ; write != NULL; write = write->get_reads_from()) {
2549 /* UNINIT actions don't have a Node, and they never sleep */
2550 if (write->is_uninitialized())
2552 Node *prevnode = write->get_node()->get_parent();
2554 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2555 if (write->is_release() && thread_sleep)
2557 if (!write->is_rmw())
2564 * @brief Get an action representing an uninitialized atomic
2566 * This function may create a new one or try to retrieve one from the NodeStack
2568 * @param curr The current action, which prompts the creation of an UNINIT action
2569 * @return A pointer to the UNINIT ModelAction
2571 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2573 Node *node = curr->get_node();
2574 ModelAction *act = node->get_uninit_action();
2576 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2577 node->set_uninit_action(act);
2579 act->create_cv(NULL);
2583 static void print_list(const action_list_t *list)
2585 action_list_t::const_iterator it;
2587 model_print("---------------------------------------------------------------------\n");
2589 unsigned int hash = 0;
2591 for (it = list->begin(); it != list->end(); it++) {
2592 const ModelAction *act = *it;
2593 if (act->get_seq_number() > 0)
2595 hash = hash^(hash<<3)^((*it)->hash());
2597 model_print("HASH %u\n", hash);
2598 model_print("---------------------------------------------------------------------\n");
2601 #if SUPPORT_MOD_ORDER_DUMP
2602 void ModelExecution::dumpGraph(char *filename) const
2605 sprintf(buffer, "%s.dot", filename);
2606 FILE *file = fopen(buffer, "w");
2607 fprintf(file, "digraph %s {\n", filename);
2608 mo_graph->dumpNodes(file);
2609 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2611 for (action_list_t::iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2612 ModelAction *act = *it;
2613 if (act->is_read()) {
2614 mo_graph->dot_print_node(file, act);
2615 if (act->get_reads_from())
2616 mo_graph->dot_print_edge(file,
2617 act->get_reads_from(),
2619 "label=\"rf\", color=red, weight=2");
2621 mo_graph->dot_print_edge(file,
2622 act->get_reads_from_promise(),
2624 "label=\"rf\", color=red");
2626 if (thread_array[act->get_tid()]) {
2627 mo_graph->dot_print_edge(file,
2628 thread_array[id_to_int(act->get_tid())],
2630 "label=\"sb\", color=blue, weight=400");
2633 thread_array[act->get_tid()] = act;
2635 fprintf(file, "}\n");
2636 model_free(thread_array);
2641 /** @brief Prints an execution trace summary. */
2642 void ModelExecution::print_summary() const
2644 #if SUPPORT_MOD_ORDER_DUMP
2645 char buffername[100];
2646 sprintf(buffername, "exec%04u", get_execution_number());
2647 mo_graph->dumpGraphToFile(buffername);
2648 sprintf(buffername, "graph%04u", get_execution_number());
2649 dumpGraph(buffername);
2652 model_print("Execution %d:", get_execution_number());
2653 if (isfeasibleprefix()) {
2654 if (scheduler->all_threads_sleeping())
2655 model_print(" SLEEP-SET REDUNDANT");
2658 print_infeasibility(" INFEASIBLE");
2659 print_list(&action_trace);
2661 if (!promises.empty()) {
2662 model_print("Pending promises:\n");
2663 for (unsigned int i = 0; i < promises.size(); i++) {
2664 model_print(" [P%u] ", i);
2665 promises[i]->print();
2672 * Add a Thread to the system for the first time. Should only be called once
2674 * @param t The Thread to add
2676 void ModelExecution::add_thread(Thread *t)
2678 thread_map.put(id_to_int(t->get_id()), t);
2679 if (!t->is_model_thread())
2680 scheduler->add_thread(t);
2684 * @brief Get a Thread reference by its ID
2685 * @param tid The Thread's ID
2686 * @return A Thread reference
2688 Thread * ModelExecution::get_thread(thread_id_t tid) const
2690 return thread_map.get(id_to_int(tid));
2694 * @brief Get a reference to the Thread in which a ModelAction was executed
2695 * @param act The ModelAction
2696 * @return A Thread reference
2698 Thread * ModelExecution::get_thread(const ModelAction *act) const
2700 return get_thread(act->get_tid());
2704 * @brief Get a Promise's "promise number"
2706 * A "promise number" is an index number that is unique to a promise, valid
2707 * only for a specific snapshot of an execution trace. Promises may come and go
2708 * as they are generated an resolved, so an index only retains meaning for the
2711 * @param promise The Promise to check
2712 * @return The promise index, if the promise still is valid; otherwise -1
2714 int ModelExecution::get_promise_number(const Promise *promise) const
2716 for (unsigned int i = 0; i < promises.size(); i++)
2717 if (promises[i] == promise)
2724 * @brief Check if a Thread is currently enabled
2725 * @param t The Thread to check
2726 * @return True if the Thread is currently enabled
2728 bool ModelExecution::is_enabled(Thread *t) const
2730 return scheduler->is_enabled(t);
2734 * @brief Check if a Thread is currently enabled
2735 * @param tid The ID of the Thread to check
2736 * @return True if the Thread is currently enabled
2738 bool ModelExecution::is_enabled(thread_id_t tid) const
2740 return scheduler->is_enabled(tid);
2744 * @brief Select the next thread to execute based on the curren action
2746 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2747 * actions should be followed by the execution of their child thread. In either
2748 * case, the current action should determine the next thread schedule.
2750 * @param curr The current action
2751 * @return The next thread to run, if the current action will determine this
2752 * selection; otherwise NULL
2754 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2756 /* Do not split atomic RMW */
2757 if (curr->is_rmwr())
2758 return get_thread(curr);
2759 /* Follow CREATE with the created thread */
2760 if (curr->get_type() == THREAD_CREATE)
2761 return curr->get_thread_operand();
2765 /** @return True if the execution has taken too many steps */
2766 bool ModelExecution::too_many_steps() const
2768 return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2772 * Takes the next step in the execution, if possible.
2773 * @param curr The current step to take
2774 * @return Returns the next Thread to run, if any; NULL if this execution
2777 Thread * ModelExecution::take_step(ModelAction *curr)
2779 Thread *curr_thrd = get_thread(curr);
2780 ASSERT(curr_thrd->get_state() == THREAD_READY);
2782 ASSERT(check_action_enabled(curr)); /* May have side effects? */
2783 curr = check_current_action(curr);
2786 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2787 scheduler->remove_thread(curr_thrd);
2789 return action_select_next_thread(curr);
2793 * Launch end-of-execution release sequence fixups only when
2794 * the execution is otherwise feasible AND there are:
2796 * (1) pending release sequences
2797 * (2) pending assertions that could be invalidated by a change
2798 * in clock vectors (i.e., data races)
2799 * (3) no pending promises
2801 void ModelExecution::fixup_release_sequences()
2803 while (!pending_rel_seqs.empty() &&
2804 is_feasible_prefix_ignore_relseq() &&
2805 !unrealizedraces.empty()) {
2806 model_print("*** WARNING: release sequence fixup action "
2807 "(%zu pending release seuqence(s)) ***\n",
2808 pending_rel_seqs.size());
2809 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2810 std::memory_order_seq_cst, NULL, VALUE_NONE,