12 #include "clockvector.h"
13 #include "cyclegraph.h"
15 #include "threads-model.h"
16 #include "bugmessage.h"
19 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 model_snapshot_members() :
26 /* First thread created will have id INITIAL_THREAD_ID */
27 next_thread_id(INITIAL_THREAD_ID),
28 used_sequence_numbers(0),
30 bad_synchronization(false),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
44 bool bad_synchronization;
50 /** @brief Constructor */
51 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler, NodeStack *node_stack) :
56 thread_map(2), /* We'll always need at least 2 threads */
60 condvar_waiters_map(),
64 thrd_last_fence_release(),
65 node_stack(node_stack),
66 priv(new struct model_snapshot_members ()),
67 mo_graph(new CycleGraph()),
70 /* Initialize a model-checker thread, for special ModelActions */
71 model_thread = new Thread(get_next_id());
72 add_thread(model_thread);
73 scheduler->register_engine(this);
74 node_stack->register_engine(this);
77 /** @brief Destructor */
78 ModelExecution::~ModelExecution()
80 for (unsigned int i = 0;i < get_num_threads();i++)
81 delete get_thread(int_to_id(i));
87 int ModelExecution::get_execution_number() const
89 return model->get_execution_number();
92 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
94 action_list_t *tmp = hash->get(ptr);
96 tmp = new action_list_t();
102 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
104 SnapVector<action_list_t> *tmp = hash->get(ptr);
106 tmp = new SnapVector<action_list_t>();
112 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
114 SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
117 unsigned int thread=id_to_int(tid);
118 if (thread < wrv->size())
119 return &(*wrv)[thread];
124 /** @return a thread ID for a new Thread */
125 thread_id_t ModelExecution::get_next_id()
127 return priv->next_thread_id++;
130 /** @return the number of user threads created during this execution */
131 unsigned int ModelExecution::get_num_threads() const
133 return priv->next_thread_id;
136 /** @return a sequence number for a new ModelAction */
137 modelclock_t ModelExecution::get_next_seq_num()
139 return ++priv->used_sequence_numbers;
143 * @brief Should the current action wake up a given thread?
145 * @param curr The current action
146 * @param thread The thread that we might wake up
147 * @return True, if we should wake up the sleeping thread; false otherwise
149 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
151 const ModelAction *asleep = thread->get_pending();
152 /* Don't allow partial RMW to wake anyone up */
155 /* Synchronizing actions may have been backtracked */
156 if (asleep->could_synchronize_with(curr))
158 /* All acquire/release fences and fence-acquire/store-release */
159 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
161 /* Fence-release + store can awake load-acquire on the same location */
162 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
163 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
164 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
170 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
172 for (unsigned int i = 0;i < get_num_threads();i++) {
173 Thread *thr = get_thread(int_to_id(i));
174 if (scheduler->is_sleep_set(thr)) {
175 if (should_wake_up(curr, thr))
176 /* Remove this thread from sleep set */
177 scheduler->remove_sleep(thr);
182 /** @brief Alert the model-checker that an incorrectly-ordered
183 * synchronization was made */
184 void ModelExecution::set_bad_synchronization()
186 priv->bad_synchronization = true;
189 bool ModelExecution::assert_bug(const char *msg)
191 priv->bugs.push_back(new bug_message(msg));
193 if (isfeasibleprefix()) {
200 /** @return True, if any bugs have been reported for this execution */
201 bool ModelExecution::have_bug_reports() const
203 return priv->bugs.size() != 0;
206 SnapVector<bug_message *> * ModelExecution::get_bugs() const
212 * Check whether the current trace has triggered an assertion which should halt
215 * @return True, if the execution should be aborted; false otherwise
217 bool ModelExecution::has_asserted() const
219 return priv->asserted;
223 * Trigger a trace assertion which should cause this execution to be halted.
224 * This can be due to a detected bug or due to an infeasibility that should
227 void ModelExecution::set_assert()
229 priv->asserted = true;
233 * Check if we are in a deadlock. Should only be called at the end of an
234 * execution, although it should not give false positives in the middle of an
235 * execution (there should be some ENABLED thread).
237 * @return True if program is in a deadlock; false otherwise
239 bool ModelExecution::is_deadlocked() const
241 bool blocking_threads = false;
242 for (unsigned int i = 0;i < get_num_threads();i++) {
243 thread_id_t tid = int_to_id(i);
246 Thread *t = get_thread(tid);
247 if (!t->is_model_thread() && t->get_pending())
248 blocking_threads = true;
250 return blocking_threads;
254 * Check if this is a complete execution. That is, have all thread completed
255 * execution (rather than exiting because sleep sets have forced a redundant
258 * @return True if the execution is complete.
260 bool ModelExecution::is_complete_execution() const
262 for (unsigned int i = 0;i < get_num_threads();i++)
263 if (is_enabled(int_to_id(i)))
270 * Processes a read model action.
271 * @param curr is the read model action to process.
272 * @param rf_set is the set of model actions we can possibly read from
273 * @return True if processing this read updates the mo_graph.
275 void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
277 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
280 int index = fuzzer->selectWrite(curr, rf_set);
281 const ModelAction *rf = (*rf_set)[index];
286 if (r_modification_order(curr, rf, priorset)) {
287 for(unsigned int i=0;i<priorset->size();i++) {
288 mo_graph->addEdge((*priorset)[i], rf);
291 get_thread(curr)->set_return_value(curr->get_return_value());
296 (*rf_set)[index] = rf_set->back();
302 * Processes a lock, trylock, or unlock model action. @param curr is
303 * the read model action to process.
305 * The try lock operation checks whether the lock is taken. If not,
306 * it falls to the normal lock operation case. If so, it returns
309 * The lock operation has already been checked that it is enabled, so
310 * it just grabs the lock and synchronizes with the previous unlock.
312 * The unlock operation has to re-enable all of the threads that are
313 * waiting on the lock.
315 * @return True if synchronization was updated; false otherwise
317 bool ModelExecution::process_mutex(ModelAction *curr)
319 cdsc::mutex *mutex = curr->get_mutex();
320 struct cdsc::mutex_state *state = NULL;
323 state = mutex->get_state();
325 switch (curr->get_type()) {
326 case ATOMIC_TRYLOCK: {
327 bool success = !state->locked;
328 curr->set_try_lock(success);
330 get_thread(curr)->set_return_value(0);
333 get_thread(curr)->set_return_value(1);
335 //otherwise fall into the lock case
337 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
338 assert_bug("Lock access before initialization");
339 state->locked = get_thread(curr);
340 ModelAction *unlock = get_last_unlock(curr);
341 //synchronize with the previous unlock statement
342 if (unlock != NULL) {
343 synchronize(unlock, curr);
349 case ATOMIC_UNLOCK: {
350 /* wake up the other threads */
351 for (unsigned int i = 0;i < get_num_threads();i++) {
352 Thread *t = get_thread(int_to_id(i));
353 Thread *curr_thrd = get_thread(curr);
354 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
358 /* unlock the lock - after checking who was waiting on it */
359 state->locked = NULL;
361 if (!curr->is_wait())
362 break;/* The rest is only for ATOMIC_WAIT */
366 case ATOMIC_NOTIFY_ALL: {
367 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
368 //activate all the waiting threads
369 for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
370 scheduler->wake(get_thread(*rit));
375 case ATOMIC_NOTIFY_ONE: {
376 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
377 Thread * thread = fuzzer->selectNotify(waiters);
378 scheduler->wake(thread);
389 * Process a write ModelAction
390 * @param curr The ModelAction to process
391 * @return True if the mo_graph was updated or promises were resolved
393 void ModelExecution::process_write(ModelAction *curr)
396 w_modification_order(curr);
399 get_thread(curr)->set_return_value(VALUE_NONE);
403 * Process a fence ModelAction
404 * @param curr The ModelAction to process
405 * @return True if synchronization was updated
407 bool ModelExecution::process_fence(ModelAction *curr)
410 * fence-relaxed: no-op
411 * fence-release: only log the occurence (not in this function), for
412 * use in later synchronization
413 * fence-acquire (this function): search for hypothetical release
415 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
417 bool updated = false;
418 if (curr->is_acquire()) {
419 action_list_t *list = &action_trace;
420 action_list_t::reverse_iterator rit;
421 /* Find X : is_read(X) && X --sb-> curr */
422 for (rit = list->rbegin();rit != list->rend();rit++) {
423 ModelAction *act = *rit;
426 if (act->get_tid() != curr->get_tid())
428 /* Stop at the beginning of the thread */
429 if (act->is_thread_start())
431 /* Stop once we reach a prior fence-acquire */
432 if (act->is_fence() && act->is_acquire())
436 /* read-acquire will find its own release sequences */
437 if (act->is_acquire())
440 /* Establish hypothetical release sequences */
441 rel_heads_list_t release_heads;
442 get_release_seq_heads(curr, act, &release_heads);
443 for (unsigned int i = 0;i < release_heads.size();i++)
444 synchronize(release_heads[i], curr);
445 if (release_heads.size() != 0)
453 * @brief Process the current action for thread-related activity
455 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
456 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
457 * synchronization, etc. This function is a no-op for non-THREAD actions
458 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
460 * @param curr The current action
461 * @return True if synchronization was updated or a thread completed
463 bool ModelExecution::process_thread_action(ModelAction *curr)
465 bool updated = false;
467 switch (curr->get_type()) {
468 case THREAD_CREATE: {
469 thrd_t *thrd = (thrd_t *)curr->get_location();
470 struct thread_params *params = (struct thread_params *)curr->get_value();
471 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
472 curr->set_thread_operand(th);
474 th->set_creation(curr);
477 case PTHREAD_CREATE: {
478 (*(uint32_t *)curr->get_location()) = pthread_counter++;
480 struct pthread_params *params = (struct pthread_params *)curr->get_value();
481 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
482 curr->set_thread_operand(th);
484 th->set_creation(curr);
486 if ( pthread_map.size() < pthread_counter )
487 pthread_map.resize( pthread_counter );
488 pthread_map[ pthread_counter-1 ] = th;
493 Thread *blocking = curr->get_thread_operand();
494 ModelAction *act = get_last_action(blocking->get_id());
495 synchronize(act, curr);
496 updated = true; /* trigger rel-seq checks */
500 Thread *blocking = curr->get_thread_operand();
501 ModelAction *act = get_last_action(blocking->get_id());
502 synchronize(act, curr);
503 updated = true; /* trigger rel-seq checks */
504 break; // WL: to be add (modified)
507 case THREAD_FINISH: {
508 Thread *th = get_thread(curr);
509 /* Wake up any joining threads */
510 for (unsigned int i = 0;i < get_num_threads();i++) {
511 Thread *waiting = get_thread(int_to_id(i));
512 if (waiting->waiting_on() == th &&
513 waiting->get_pending()->is_thread_join())
514 scheduler->wake(waiting);
517 updated = true; /* trigger rel-seq checks */
531 * Initialize the current action by performing one or more of the following
532 * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
533 * in the NodeStack, manipulating backtracking sets, allocating and
534 * initializing clock vectors, and computing the promises to fulfill.
536 * @param curr The current action, as passed from the user context; may be
537 * freed/invalidated after the execution of this function, with a different
538 * action "returned" its place (pass-by-reference)
539 * @return True if curr is a newly-explored action; false otherwise
541 bool ModelExecution::initialize_curr_action(ModelAction **curr)
543 ModelAction *newcurr;
545 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
546 newcurr = process_rmw(*curr);
553 (*curr)->set_seq_number(get_next_seq_num());
555 newcurr = node_stack->explore_action(*curr);
557 /* First restore type and order in case of RMW operation */
558 if ((*curr)->is_rmwr())
559 newcurr->copy_typeandorder(*curr);
561 ASSERT((*curr)->get_location() == newcurr->get_location());
562 newcurr->copy_from_new(*curr);
564 /* Discard duplicate ModelAction; use action from NodeStack */
567 /* Always compute new clock vector */
568 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
571 return false; /* Action was explored previously */
575 /* Always compute new clock vector */
576 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
578 /* Assign most recent release fence */
579 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
581 return true; /* This was a new ModelAction */
586 * @brief Establish reads-from relation between two actions
588 * Perform basic operations involved with establishing a concrete rf relation,
589 * including setting the ModelAction data and checking for release sequences.
591 * @param act The action that is reading (must be a read)
592 * @param rf The action from which we are reading (must be a write)
594 * @return True if this read established synchronization
597 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
600 ASSERT(rf->is_write());
602 act->set_read_from(rf);
603 if (act->is_acquire()) {
604 rel_heads_list_t release_heads;
605 get_release_seq_heads(act, act, &release_heads);
606 int num_heads = release_heads.size();
607 for (unsigned int i = 0;i < release_heads.size();i++)
608 if (!synchronize(release_heads[i], act))
610 return num_heads > 0;
616 * @brief Synchronizes two actions
618 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
619 * This function performs the synchronization as well as providing other hooks
620 * for other checks along with synchronization.
622 * @param first The left-hand side of the synchronizes-with relation
623 * @param second The right-hand side of the synchronizes-with relation
624 * @return True if the synchronization was successful (i.e., was consistent
625 * with the execution order); false otherwise
627 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
629 if (*second < *first) {
630 set_bad_synchronization();
633 return second->synchronize_with(first);
637 * @brief Check whether a model action is enabled.
639 * Checks whether an operation would be successful (i.e., is a lock already
640 * locked, or is the joined thread already complete).
642 * For yield-blocking, yields are never enabled.
644 * @param curr is the ModelAction to check whether it is enabled.
645 * @return a bool that indicates whether the action is enabled.
647 bool ModelExecution::check_action_enabled(ModelAction *curr) {
648 if (curr->is_lock()) {
649 cdsc::mutex *lock = curr->get_mutex();
650 struct cdsc::mutex_state *state = lock->get_state();
653 } else if (curr->is_thread_join()) {
654 Thread *blocking = curr->get_thread_operand();
655 if (!blocking->is_complete()) {
664 * This is the heart of the model checker routine. It performs model-checking
665 * actions corresponding to a given "current action." Among other processes, it
666 * calculates reads-from relationships, updates synchronization clock vectors,
667 * forms a memory_order constraints graph, and handles replay/backtrack
668 * execution when running permutations of previously-observed executions.
670 * @param curr The current action to process
671 * @return The ModelAction that is actually executed; may be different than
674 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
677 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
678 bool newly_explored = initialize_curr_action(&curr);
682 wake_up_sleeping_actions(curr);
684 /* Add the action to lists before any other model-checking tasks */
685 if (!second_part_of_rmw)
686 add_action_to_lists(curr);
688 SnapVector<ModelAction *> * rf_set = NULL;
689 /* Build may_read_from set for newly-created actions */
690 if (newly_explored && curr->is_read())
691 rf_set = build_may_read_from(curr);
693 process_thread_action(curr);
695 if (curr->is_read() && !second_part_of_rmw) {
696 process_read(curr, rf_set);
699 ASSERT(rf_set == NULL);
702 if (curr->is_write())
705 if (curr->is_fence())
708 if (curr->is_mutex_op())
715 * This is the strongest feasibility check available.
716 * @return whether the current trace (partial or complete) must be a prefix of
719 bool ModelExecution::isfeasibleprefix() const
721 return !is_infeasible();
725 * Print disagnostic information about an infeasible execution
726 * @param prefix A string to prefix the output with; if NULL, then a default
727 * message prefix will be provided
729 void ModelExecution::print_infeasibility(const char *prefix) const
733 if (priv->bad_synchronization)
734 ptr += sprintf(ptr, "[bad sw ordering]");
736 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
740 * Check if the current partial trace is infeasible. Does not check any
741 * end-of-execution flags, which might rule out the execution. Thus, this is
742 * useful only for ruling an execution as infeasible.
743 * @return whether the current partial trace is infeasible.
745 bool ModelExecution::is_infeasible() const
747 return priv->bad_synchronization;
750 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
751 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
752 ModelAction *lastread = get_last_action(act->get_tid());
753 lastread->process_rmw(act);
755 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
761 * @brief Updates the mo_graph with the constraints imposed from the current
764 * Basic idea is the following: Go through each other thread and find
765 * the last action that happened before our read. Two cases:
767 * -# The action is a write: that write must either occur before
768 * the write we read from or be the write we read from.
769 * -# The action is a read: the write that that action read from
770 * must occur before the write we read from or be the same write.
772 * @param curr The current action. Must be a read.
773 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
774 * @return True if modification order edges were added; false otherwise
777 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset)
779 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
781 ASSERT(curr->is_read());
783 /* Last SC fence in the current thread */
784 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
785 ModelAction *last_sc_write = NULL;
786 if (curr->is_seqcst())
787 last_sc_write = get_last_seq_cst_write(curr);
789 /* Iterate over all threads */
790 for (i = 0;i < thrd_lists->size();i++) {
791 /* Last SC fence in thread i */
792 ModelAction *last_sc_fence_thread_local = NULL;
793 if (int_to_id((int)i) != curr->get_tid())
794 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
796 /* Last SC fence in thread i, before last SC fence in current thread */
797 ModelAction *last_sc_fence_thread_before = NULL;
798 if (last_sc_fence_local)
799 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
801 /* Iterate over actions in thread, starting from most recent */
802 action_list_t *list = &(*thrd_lists)[i];
803 action_list_t::reverse_iterator rit;
804 for (rit = list->rbegin();rit != list->rend();rit++) {
805 ModelAction *act = *rit;
810 /* Don't want to add reflexive edges on 'rf' */
811 if (act->equals(rf)) {
812 if (act->happens_before(curr))
818 if (act->is_write()) {
819 /* C++, Section 29.3 statement 5 */
820 if (curr->is_seqcst() && last_sc_fence_thread_local &&
821 *act < *last_sc_fence_thread_local) {
822 if (mo_graph->checkReachable(rf, act))
824 priorset->push_back(act);
827 /* C++, Section 29.3 statement 4 */
828 else if (act->is_seqcst() && last_sc_fence_local &&
829 *act < *last_sc_fence_local) {
830 if (mo_graph->checkReachable(rf, act))
832 priorset->push_back(act);
835 /* C++, Section 29.3 statement 6 */
836 else if (last_sc_fence_thread_before &&
837 *act < *last_sc_fence_thread_before) {
838 if (mo_graph->checkReachable(rf, act))
840 priorset->push_back(act);
846 * Include at most one act per-thread that "happens
849 if (act->happens_before(curr)) {
850 if (act->is_write()) {
851 if (mo_graph->checkReachable(rf, act))
853 priorset->push_back(act);
855 const ModelAction *prevrf = act->get_reads_from();
856 if (!prevrf->equals(rf)) {
857 if (mo_graph->checkReachable(rf, prevrf))
859 priorset->push_back(prevrf);
870 * Updates the mo_graph with the constraints imposed from the current write.
872 * Basic idea is the following: Go through each other thread and find
873 * the lastest action that happened before our write. Two cases:
875 * (1) The action is a write => that write must occur before
878 * (2) The action is a read => the write that that action read from
879 * must occur before the current write.
881 * This method also handles two other issues:
883 * (I) Sequential Consistency: Making sure that if the current write is
884 * seq_cst, that it occurs after the previous seq_cst write.
886 * (II) Sending the write back to non-synchronizing reads.
888 * @param curr The current action. Must be a write.
889 * @param send_fv A vector for stashing reads to which we may pass our future
890 * value. If NULL, then don't record any future values.
891 * @return True if modification order edges were added; false otherwise
893 void ModelExecution::w_modification_order(ModelAction *curr)
895 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
897 ASSERT(curr->is_write());
899 if (curr->is_seqcst()) {
900 /* We have to at least see the last sequentially consistent write,
901 so we are initialized. */
902 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
903 if (last_seq_cst != NULL) {
904 mo_graph->addEdge(last_seq_cst, curr);
908 /* Last SC fence in the current thread */
909 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
911 /* Iterate over all threads */
912 for (i = 0;i < thrd_lists->size();i++) {
913 /* Last SC fence in thread i, before last SC fence in current thread */
914 ModelAction *last_sc_fence_thread_before = NULL;
915 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
916 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
918 /* Iterate over actions in thread, starting from most recent */
919 action_list_t *list = &(*thrd_lists)[i];
920 action_list_t::reverse_iterator rit;
921 bool force_edge = false;
922 for (rit = list->rbegin();rit != list->rend();rit++) {
923 ModelAction *act = *rit;
926 * 1) If RMW and it actually read from something, then we
927 * already have all relevant edges, so just skip to next
930 * 2) If RMW and it didn't read from anything, we should
931 * whatever edge we can get to speed up convergence.
933 * 3) If normal write, we need to look at earlier actions, so
934 * continue processing list.
937 if (curr->is_rmw()) {
938 if (curr->get_reads_from() != NULL)
946 /* C++, Section 29.3 statement 7 */
947 if (last_sc_fence_thread_before && act->is_write() &&
948 *act < *last_sc_fence_thread_before) {
949 mo_graph->addEdge(act, curr, force_edge);
954 * Include at most one act per-thread that "happens
957 if (act->happens_before(curr)) {
959 * Note: if act is RMW, just add edge:
961 * The following edge should be handled elsewhere:
962 * readfrom(act) --mo--> act
965 mo_graph->addEdge(act, curr, force_edge);
966 else if (act->is_read()) {
967 //if previous read accessed a null, just keep going
968 mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
971 } else if (act->is_read() && !act->could_synchronize_with(curr) &&
972 !act->same_thread(curr)) {
973 /* We have an action that:
974 (1) did not happen before us
975 (2) is a read and we are a write
976 (3) cannot synchronize with us
977 (4) is in a different thread
979 that read could potentially read from our write. Note that
980 these checks are overly conservative at this point, we'll
981 do more checks before actually removing the
992 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
993 * some constraints. This method checks one the following constraint (others
994 * require compiler support):
996 * If X --hb-> Y --mo-> Z, then X should not read from Z.
997 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
999 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1001 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1003 /* Iterate over all threads */
1004 for (i = 0;i < thrd_lists->size();i++) {
1005 const ModelAction *write_after_read = NULL;
1007 /* Iterate over actions in thread, starting from most recent */
1008 action_list_t *list = &(*thrd_lists)[i];
1009 action_list_t::reverse_iterator rit;
1010 for (rit = list->rbegin();rit != list->rend();rit++) {
1011 ModelAction *act = *rit;
1013 /* Don't disallow due to act == reader */
1014 if (!reader->happens_before(act) || reader == act)
1016 else if (act->is_write())
1017 write_after_read = act;
1018 else if (act->is_read() && act->get_reads_from() != NULL)
1019 write_after_read = act->get_reads_from();
1022 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1029 * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1030 * The ModelAction under consideration is expected to be taking part in
1031 * release/acquire synchronization as an object of the "reads from" relation.
1032 * Note that this can only provide release sequence support for RMW chains
1033 * which do not read from the future, as those actions cannot be traced until
1034 * their "promise" is fulfilled. Similarly, we may not even establish the
1035 * presence of a release sequence with certainty, as some modification order
1036 * constraints may be decided further in the future. Thus, this function
1037 * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1038 * and a boolean representing certainty.
1040 * @param rf The action that might be part of a release sequence. Must be a
1042 * @param release_heads A pass-by-reference style return parameter. After
1043 * execution of this function, release_heads will contain the heads of all the
1044 * relevant release sequences, if any exists with certainty
1045 * @return true, if the ModelExecution is certain that release_heads is complete;
1048 bool ModelExecution::release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads) const
1051 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1052 ASSERT(rf->is_write());
1054 if (rf->is_release())
1055 release_heads->push_back(rf);
1056 else if (rf->get_last_fence_release())
1057 release_heads->push_back(rf->get_last_fence_release());
1059 break;/* End of RMW chain */
1061 /** @todo Need to be smarter here... In the linux lock
1062 * example, this will run to the beginning of the program for
1064 /** @todo The way to be smarter here is to keep going until 1
1065 * thread has a release preceded by an acquire and you've seen
1068 /* acq_rel RMW is a sufficient stopping condition */
1069 if (rf->is_acquire() && rf->is_release())
1070 return true;/* complete */
1072 ASSERT(rf); // Needs to be real write
1074 if (rf->is_release())
1075 return true;/* complete */
1077 /* else relaxed write
1078 * - check for fence-release in the same thread (29.8, stmt. 3)
1079 * - check modification order for contiguous subsequence
1080 * -> rf must be same thread as release */
1082 const ModelAction *fence_release = rf->get_last_fence_release();
1083 /* Synchronize with a fence-release unconditionally; we don't need to
1084 * find any more "contiguous subsequence..." for it */
1086 release_heads->push_back(fence_release);
1088 return true; /* complete */
1092 * An interface for getting the release sequence head(s) with which a
1093 * given ModelAction must synchronize. This function only returns a non-empty
1094 * result when it can locate a release sequence head with certainty. Otherwise,
1095 * it may mark the internal state of the ModelExecution so that it will handle
1096 * the release sequence at a later time, causing @a acquire to update its
1097 * synchronization at some later point in execution.
1099 * @param acquire The 'acquire' action that may synchronize with a release
1101 * @param read The read action that may read from a release sequence; this may
1102 * be the same as acquire, or else an earlier action in the same thread (i.e.,
1103 * when 'acquire' is a fence-acquire)
1104 * @param release_heads A pass-by-reference return parameter. Will be filled
1105 * with the head(s) of the release sequence(s), if they exists with certainty.
1106 * @see ModelExecution::release_seq_heads
1108 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
1109 ModelAction *read, rel_heads_list_t *release_heads)
1111 const ModelAction *rf = read->get_reads_from();
1113 release_seq_heads(rf, release_heads);
1117 * Performs various bookkeeping operations for the current ModelAction. For
1118 * instance, adds action to the per-object, per-thread action vector and to the
1119 * action trace list of all thread actions.
1121 * @param act is the ModelAction to add.
1123 void ModelExecution::add_action_to_lists(ModelAction *act)
1125 int tid = id_to_int(act->get_tid());
1126 ModelAction *uninit = NULL;
1128 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1129 if (list->empty() && act->is_atomic_var()) {
1130 uninit = get_uninitialized_action(act);
1131 uninit_id = id_to_int(uninit->get_tid());
1132 list->push_front(uninit);
1134 list->push_back(act);
1136 action_trace.push_back(act);
1138 action_trace.push_front(uninit);
1140 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1141 if (tid >= (int)vec->size())
1142 vec->resize(priv->next_thread_id);
1143 (*vec)[tid].push_back(act);
1145 (*vec)[uninit_id].push_front(uninit);
1147 if ((int)thrd_last_action.size() <= tid)
1148 thrd_last_action.resize(get_num_threads());
1149 thrd_last_action[tid] = act;
1151 thrd_last_action[uninit_id] = uninit;
1153 if (act->is_fence() && act->is_release()) {
1154 if ((int)thrd_last_fence_release.size() <= tid)
1155 thrd_last_fence_release.resize(get_num_threads());
1156 thrd_last_fence_release[tid] = act;
1159 if (act->is_wait()) {
1160 void *mutex_loc = (void *) act->get_value();
1161 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1163 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1164 if (tid >= (int)vec->size())
1165 vec->resize(priv->next_thread_id);
1166 (*vec)[tid].push_back(act);
1171 * @brief Get the last action performed by a particular Thread
1172 * @param tid The thread ID of the Thread in question
1173 * @return The last action in the thread
1175 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1177 int threadid = id_to_int(tid);
1178 if (threadid < (int)thrd_last_action.size())
1179 return thrd_last_action[id_to_int(tid)];
1185 * @brief Get the last fence release performed by a particular Thread
1186 * @param tid The thread ID of the Thread in question
1187 * @return The last fence release in the thread, if one exists; NULL otherwise
1189 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1191 int threadid = id_to_int(tid);
1192 if (threadid < (int)thrd_last_fence_release.size())
1193 return thrd_last_fence_release[id_to_int(tid)];
1199 * Gets the last memory_order_seq_cst write (in the total global sequence)
1200 * performed on a particular object (i.e., memory location), not including the
1202 * @param curr The current ModelAction; also denotes the object location to
1204 * @return The last seq_cst write
1206 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1208 void *location = curr->get_location();
1209 action_list_t *list = obj_map.get(location);
1210 /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
1211 action_list_t::reverse_iterator rit;
1212 for (rit = list->rbegin();(*rit) != curr;rit++)
1214 rit++; /* Skip past curr */
1215 for ( ;rit != list->rend();rit++)
1216 if ((*rit)->is_write() && (*rit)->is_seqcst())
1222 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1223 * performed in a particular thread, prior to a particular fence.
1224 * @param tid The ID of the thread to check
1225 * @param before_fence The fence from which to begin the search; if NULL, then
1226 * search for the most recent fence in the thread.
1227 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1229 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1231 /* All fences should have location FENCE_LOCATION */
1232 action_list_t *list = obj_map.get(FENCE_LOCATION);
1237 action_list_t::reverse_iterator rit = list->rbegin();
1240 for (;rit != list->rend();rit++)
1241 if (*rit == before_fence)
1244 ASSERT(*rit == before_fence);
1248 for (;rit != list->rend();rit++)
1249 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
1255 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1256 * location). This function identifies the mutex according to the current
1257 * action, which is presumed to perform on the same mutex.
1258 * @param curr The current ModelAction; also denotes the object location to
1260 * @return The last unlock operation
1262 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1264 void *location = curr->get_location();
1266 action_list_t *list = obj_map.get(location);
1267 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1268 action_list_t::reverse_iterator rit;
1269 for (rit = list->rbegin();rit != list->rend();rit++)
1270 if ((*rit)->is_unlock() || (*rit)->is_wait())
1275 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1277 ModelAction *parent = get_last_action(tid);
1279 parent = get_thread(tid)->get_creation();
1284 * Returns the clock vector for a given thread.
1285 * @param tid The thread whose clock vector we want
1286 * @return Desired clock vector
1288 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1290 return get_parent_action(tid)->get_cv();
1293 bool valequals(uint64_t val1, uint64_t val2, int size) {
1296 return ((uint8_t)val1) == ((uint8_t)val2);
1298 return ((uint16_t)val1) == ((uint16_t)val2);
1300 return ((uint32_t)val1) == ((uint32_t)val2);
1310 * Build up an initial set of all past writes that this 'read' action may read
1311 * from, as well as any previously-observed future values that must still be valid.
1313 * @param curr is the current ModelAction that we are exploring; it must be a
1316 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1318 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1320 ASSERT(curr->is_read());
1322 ModelAction *last_sc_write = NULL;
1324 if (curr->is_seqcst())
1325 last_sc_write = get_last_seq_cst_write(curr);
1327 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1329 /* Iterate over all threads */
1330 for (i = 0;i < thrd_lists->size();i++) {
1331 /* Iterate over actions in thread, starting from most recent */
1332 action_list_t *list = &(*thrd_lists)[i];
1333 action_list_t::reverse_iterator rit;
1334 for (rit = list->rbegin();rit != list->rend();rit++) {
1335 ModelAction *act = *rit;
1337 /* Only consider 'write' actions */
1338 if (!act->is_write() || act == curr)
1341 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1342 bool allow_read = true;
1344 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1347 /* Need to check whether we will have two RMW reading from the same value */
1348 if (curr->is_rmwr()) {
1349 /* It is okay if we have a failing CAS */
1350 if (!curr->is_rmwrcas() ||
1351 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1352 //Need to make sure we aren't the second RMW
1353 CycleNode * node = mo_graph->getNode_noCreate(act);
1354 if (node != NULL && node->getRMW() != NULL) {
1355 //we are the second RMW
1362 /* Only add feasible reads */
1363 rf_set->push_back(act);
1366 /* Include at most one act per-thread that "happens before" curr */
1367 if (act->happens_before(curr))
1372 if (DBG_ENABLED()) {
1373 model_print("Reached read action:\n");
1375 model_print("End printing read_from_past\n");
1381 * @brief Get an action representing an uninitialized atomic
1383 * This function may create a new one or try to retrieve one from the NodeStack
1385 * @param curr The current action, which prompts the creation of an UNINIT action
1386 * @return A pointer to the UNINIT ModelAction
1388 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
1390 Node *node = curr->get_node();
1391 ModelAction *act = node->get_uninit_action();
1393 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1394 node->set_uninit_action(act);
1396 act->create_cv(NULL);
1400 static void print_list(const action_list_t *list)
1402 action_list_t::const_iterator it;
1404 model_print("------------------------------------------------------------------------------------\n");
1405 model_print("# t Action type MO Location Value Rf CV\n");
1406 model_print("------------------------------------------------------------------------------------\n");
1408 unsigned int hash = 0;
1410 for (it = list->begin();it != list->end();it++) {
1411 const ModelAction *act = *it;
1412 if (act->get_seq_number() > 0)
1414 hash = hash^(hash<<3)^((*it)->hash());
1416 model_print("HASH %u\n", hash);
1417 model_print("------------------------------------------------------------------------------------\n");
1420 #if SUPPORT_MOD_ORDER_DUMP
1421 void ModelExecution::dumpGraph(char *filename) const
1424 sprintf(buffer, "%s.dot", filename);
1425 FILE *file = fopen(buffer, "w");
1426 fprintf(file, "digraph %s {\n", filename);
1427 mo_graph->dumpNodes(file);
1428 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1430 for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
1431 ModelAction *act = *it;
1432 if (act->is_read()) {
1433 mo_graph->dot_print_node(file, act);
1434 mo_graph->dot_print_edge(file,
1435 act->get_reads_from(),
1437 "label=\"rf\", color=red, weight=2");
1439 if (thread_array[act->get_tid()]) {
1440 mo_graph->dot_print_edge(file,
1441 thread_array[id_to_int(act->get_tid())],
1443 "label=\"sb\", color=blue, weight=400");
1446 thread_array[act->get_tid()] = act;
1448 fprintf(file, "}\n");
1449 model_free(thread_array);
1454 /** @brief Prints an execution trace summary. */
1455 void ModelExecution::print_summary() const
1457 #if SUPPORT_MOD_ORDER_DUMP
1458 char buffername[100];
1459 sprintf(buffername, "exec%04u", get_execution_number());
1460 mo_graph->dumpGraphToFile(buffername);
1461 sprintf(buffername, "graph%04u", get_execution_number());
1462 dumpGraph(buffername);
1465 model_print("Execution trace %d:", get_execution_number());
1466 if (isfeasibleprefix()) {
1467 if (scheduler->all_threads_sleeping())
1468 model_print(" SLEEP-SET REDUNDANT");
1469 if (have_bug_reports())
1470 model_print(" DETECTED BUG(S)");
1472 print_infeasibility(" INFEASIBLE");
1475 print_list(&action_trace);
1481 * Add a Thread to the system for the first time. Should only be called once
1483 * @param t The Thread to add
1485 void ModelExecution::add_thread(Thread *t)
1487 unsigned int i = id_to_int(t->get_id());
1488 if (i >= thread_map.size())
1489 thread_map.resize(i + 1);
1491 if (!t->is_model_thread())
1492 scheduler->add_thread(t);
1496 * @brief Get a Thread reference by its ID
1497 * @param tid The Thread's ID
1498 * @return A Thread reference
1500 Thread * ModelExecution::get_thread(thread_id_t tid) const
1502 unsigned int i = id_to_int(tid);
1503 if (i < thread_map.size())
1504 return thread_map[i];
1509 * @brief Get a reference to the Thread in which a ModelAction was executed
1510 * @param act The ModelAction
1511 * @return A Thread reference
1513 Thread * ModelExecution::get_thread(const ModelAction *act) const
1515 return get_thread(act->get_tid());
1519 * @brief Get a Thread reference by its pthread ID
1520 * @param index The pthread's ID
1521 * @return A Thread reference
1523 Thread * ModelExecution::get_pthread(pthread_t pid) {
1529 uint32_t thread_id = x.v;
1530 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1535 * @brief Check if a Thread is currently enabled
1536 * @param t The Thread to check
1537 * @return True if the Thread is currently enabled
1539 bool ModelExecution::is_enabled(Thread *t) const
1541 return scheduler->is_enabled(t);
1545 * @brief Check if a Thread is currently enabled
1546 * @param tid The ID of the Thread to check
1547 * @return True if the Thread is currently enabled
1549 bool ModelExecution::is_enabled(thread_id_t tid) const
1551 return scheduler->is_enabled(tid);
1555 * @brief Select the next thread to execute based on the curren action
1557 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1558 * actions should be followed by the execution of their child thread. In either
1559 * case, the current action should determine the next thread schedule.
1561 * @param curr The current action
1562 * @return The next thread to run, if the current action will determine this
1563 * selection; otherwise NULL
1565 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1567 /* Do not split atomic RMW */
1568 if (curr->is_rmwr())
1569 return get_thread(curr);
1570 if (curr->is_write()) {
1571 std::memory_order order = curr->get_mo();
1573 case std::memory_order_relaxed:
1574 return get_thread(curr);
1575 case std::memory_order_release:
1576 return get_thread(curr);
1582 /* Follow CREATE with the created thread */
1583 /* which is not needed, because model.cc takes care of this */
1584 if (curr->get_type() == THREAD_CREATE)
1585 return curr->get_thread_operand();
1586 if (curr->get_type() == PTHREAD_CREATE) {
1587 return curr->get_thread_operand();
1593 * Takes the next step in the execution, if possible.
1594 * @param curr The current step to take
1595 * @return Returns the next Thread to run, if any; NULL if this execution
1598 Thread * ModelExecution::take_step(ModelAction *curr)
1600 Thread *curr_thrd = get_thread(curr);
1601 ASSERT(curr_thrd->get_state() == THREAD_READY);
1603 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1604 curr = check_current_action(curr);
1607 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1608 scheduler->remove_thread(curr_thrd);
1610 return action_select_next_thread(curr);
1613 Fuzzer * ModelExecution::getFuzzer() {