11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
31 bad_synchronization(false),
35 ~model_snapshot_members() {
36 for (unsigned int i = 0;i < bugs.size();i++)
41 unsigned int next_thread_id;
42 modelclock_t used_sequence_numbers;
43 SnapVector<bug_message *> bugs;
44 /** @brief Incorrectly-ordered synchronization was made */
45 bool bad_synchronization;
51 /** @brief Constructor */
52 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
57 thread_map(2), /* We'll always need at least 2 threads */
61 condvar_waiters_map(),
65 thrd_last_fence_release(),
66 priv(new struct model_snapshot_members ()),
67 mo_graph(new CycleGraph()),
68 fuzzer(new NewFuzzer()),
70 thrd_func_act_lists(),
73 /* Initialize a model-checker thread, for special ModelActions */
74 model_thread = new Thread(get_next_id());
75 add_thread(model_thread);
76 scheduler->register_engine(this);
77 fuzzer->register_engine(m->get_history(), this);
80 /** @brief Destructor */
81 ModelExecution::~ModelExecution()
83 for (unsigned int i = 0;i < get_num_threads();i++)
84 delete get_thread(int_to_id(i));
90 int ModelExecution::get_execution_number() const
92 return model->get_execution_number();
95 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
97 action_list_t *tmp = hash->get(ptr);
99 tmp = new action_list_t();
105 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
107 SnapVector<action_list_t> *tmp = hash->get(ptr);
109 tmp = new SnapVector<action_list_t>();
115 /** @return a thread ID for a new Thread */
116 thread_id_t ModelExecution::get_next_id()
118 return priv->next_thread_id++;
121 /** @return the number of user threads created during this execution */
122 unsigned int ModelExecution::get_num_threads() const
124 return priv->next_thread_id;
127 /** @return a sequence number for a new ModelAction */
128 modelclock_t ModelExecution::get_next_seq_num()
130 return ++priv->used_sequence_numbers;
134 * @brief Should the current action wake up a given thread?
136 * @param curr The current action
137 * @param thread The thread that we might wake up
138 * @return True, if we should wake up the sleeping thread; false otherwise
140 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
142 const ModelAction *asleep = thread->get_pending();
143 /* Don't allow partial RMW to wake anyone up */
146 /* Synchronizing actions may have been backtracked */
147 if (asleep->could_synchronize_with(curr))
149 /* All acquire/release fences and fence-acquire/store-release */
150 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
152 /* Fence-release + store can awake load-acquire on the same location */
153 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
154 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
155 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
158 if (asleep->is_sleep()) {
159 if (fuzzer->shouldWake(asleep))
166 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
168 for (unsigned int i = 0;i < get_num_threads();i++) {
169 Thread *thr = get_thread(int_to_id(i));
170 if (scheduler->is_sleep_set(thr)) {
171 if (should_wake_up(curr, thr))
172 /* Remove this thread from sleep set */
173 scheduler->remove_sleep(thr);
178 /** @brief Alert the model-checker that an incorrectly-ordered
179 * synchronization was made */
180 void ModelExecution::set_bad_synchronization()
182 priv->bad_synchronization = true;
185 bool ModelExecution::assert_bug(const char *msg)
187 priv->bugs.push_back(new bug_message(msg));
189 if (isfeasibleprefix()) {
196 /** @return True, if any bugs have been reported for this execution */
197 bool ModelExecution::have_bug_reports() const
199 return priv->bugs.size() != 0;
202 SnapVector<bug_message *> * ModelExecution::get_bugs() const
208 * Check whether the current trace has triggered an assertion which should halt
211 * @return True, if the execution should be aborted; false otherwise
213 bool ModelExecution::has_asserted() const
215 return priv->asserted;
219 * Trigger a trace assertion which should cause this execution to be halted.
220 * This can be due to a detected bug or due to an infeasibility that should
223 void ModelExecution::set_assert()
225 priv->asserted = true;
229 * Check if we are in a deadlock. Should only be called at the end of an
230 * execution, although it should not give false positives in the middle of an
231 * execution (there should be some ENABLED thread).
233 * @return True if program is in a deadlock; false otherwise
235 bool ModelExecution::is_deadlocked() const
237 bool blocking_threads = false;
238 for (unsigned int i = 0;i < get_num_threads();i++) {
239 thread_id_t tid = int_to_id(i);
242 Thread *t = get_thread(tid);
243 if (!t->is_model_thread() && t->get_pending())
244 blocking_threads = true;
246 return blocking_threads;
250 * Check if this is a complete execution. That is, have all thread completed
251 * execution (rather than exiting because sleep sets have forced a redundant
254 * @return True if the execution is complete.
256 bool ModelExecution::is_complete_execution() const
258 for (unsigned int i = 0;i < get_num_threads();i++)
259 if (is_enabled(int_to_id(i)))
264 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
265 uint64_t value = *((const uint64_t *) location);
266 modelclock_t storeclock;
267 thread_id_t storethread;
268 getStoreThreadAndClock(location, &storethread, &storeclock);
269 setAtomicStoreFlag(location);
270 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
271 act->set_seq_number(storeclock);
272 add_normal_write_to_lists(act);
273 add_write_to_lists(act);
274 w_modification_order(act);
275 model->get_history()->process_action(act, act->get_tid());
280 * Processes a read model action.
281 * @param curr is the read model action to process.
282 * @param rf_set is the set of model actions we can possibly read from
283 * @return True if processing this read updates the mo_graph.
285 void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
287 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
288 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
289 if (hasnonatomicstore) {
290 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
291 rf_set->push_back(nonatomicstore);
295 int index = fuzzer->selectWrite(curr, rf_set);
296 ModelAction *rf = (*rf_set)[index];
299 bool canprune = false;
300 if (r_modification_order(curr, rf, priorset, &canprune)) {
301 for(unsigned int i=0;i<priorset->size();i++) {
302 mo_graph->addEdge((*priorset)[i], rf);
305 get_thread(curr)->set_return_value(curr->get_return_value());
307 if (canprune && curr->get_type() == ATOMIC_READ) {
308 int tid = id_to_int(curr->get_tid());
309 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
314 (*rf_set)[index] = rf_set->back();
320 * Processes a lock, trylock, or unlock model action. @param curr is
321 * the read model action to process.
323 * The try lock operation checks whether the lock is taken. If not,
324 * it falls to the normal lock operation case. If so, it returns
327 * The lock operation has already been checked that it is enabled, so
328 * it just grabs the lock and synchronizes with the previous unlock.
330 * The unlock operation has to re-enable all of the threads that are
331 * waiting on the lock.
333 * @return True if synchronization was updated; false otherwise
335 bool ModelExecution::process_mutex(ModelAction *curr)
337 cdsc::mutex *mutex = curr->get_mutex();
338 struct cdsc::mutex_state *state = NULL;
341 state = mutex->get_state();
343 switch (curr->get_type()) {
344 case ATOMIC_TRYLOCK: {
345 bool success = !state->locked;
346 curr->set_try_lock(success);
348 get_thread(curr)->set_return_value(0);
351 get_thread(curr)->set_return_value(1);
353 //otherwise fall into the lock case
355 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
356 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
357 // assert_bug("Lock access before initialization");
358 state->locked = get_thread(curr);
359 ModelAction *unlock = get_last_unlock(curr);
360 //synchronize with the previous unlock statement
361 if (unlock != NULL) {
362 synchronize(unlock, curr);
368 case ATOMIC_UNLOCK: {
369 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
371 /* wake up the other threads */
372 for (unsigned int i = 0;i < get_num_threads();i++) {
373 Thread *t = get_thread(int_to_id(i));
374 Thread *curr_thrd = get_thread(curr);
375 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
379 /* unlock the lock - after checking who was waiting on it */
380 state->locked = NULL;
382 if (!curr->is_wait())
383 break;/* The rest is only for ATOMIC_WAIT */
387 case ATOMIC_NOTIFY_ALL: {
388 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
389 //activate all the waiting threads
390 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
391 scheduler->wake(get_thread(rit->getVal()));
396 case ATOMIC_NOTIFY_ONE: {
397 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
398 if (waiters->size() != 0) {
399 Thread * thread = fuzzer->selectNotify(waiters);
400 scheduler->wake(thread);
412 * Process a write ModelAction
413 * @param curr The ModelAction to process
414 * @return True if the mo_graph was updated or promises were resolved
416 void ModelExecution::process_write(ModelAction *curr)
418 w_modification_order(curr);
419 get_thread(curr)->set_return_value(VALUE_NONE);
423 * Process a fence ModelAction
424 * @param curr The ModelAction to process
425 * @return True if synchronization was updated
427 bool ModelExecution::process_fence(ModelAction *curr)
430 * fence-relaxed: no-op
431 * fence-release: only log the occurence (not in this function), for
432 * use in later synchronization
433 * fence-acquire (this function): search for hypothetical release
435 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
437 bool updated = false;
438 if (curr->is_acquire()) {
439 action_list_t *list = &action_trace;
440 sllnode<ModelAction *> * rit;
441 /* Find X : is_read(X) && X --sb-> curr */
442 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
443 ModelAction *act = rit->getVal();
446 if (act->get_tid() != curr->get_tid())
448 /* Stop at the beginning of the thread */
449 if (act->is_thread_start())
451 /* Stop once we reach a prior fence-acquire */
452 if (act->is_fence() && act->is_acquire())
456 /* read-acquire will find its own release sequences */
457 if (act->is_acquire())
460 /* Establish hypothetical release sequences */
461 ClockVector *cv = get_hb_from_write(act->get_reads_from());
462 if (cv != NULL && curr->get_cv()->merge(cv))
470 * @brief Process the current action for thread-related activity
472 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
473 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
474 * synchronization, etc. This function is a no-op for non-THREAD actions
475 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
477 * @param curr The current action
478 * @return True if synchronization was updated or a thread completed
480 void ModelExecution::process_thread_action(ModelAction *curr)
482 switch (curr->get_type()) {
483 case THREAD_CREATE: {
484 thrd_t *thrd = (thrd_t *)curr->get_location();
485 struct thread_params *params = (struct thread_params *)curr->get_value();
486 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
487 curr->set_thread_operand(th);
489 th->set_creation(curr);
492 case PTHREAD_CREATE: {
493 (*(uint32_t *)curr->get_location()) = pthread_counter++;
495 struct pthread_params *params = (struct pthread_params *)curr->get_value();
496 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
497 curr->set_thread_operand(th);
499 th->set_creation(curr);
501 if ( pthread_map.size() < pthread_counter )
502 pthread_map.resize( pthread_counter );
503 pthread_map[ pthread_counter-1 ] = th;
508 Thread *blocking = curr->get_thread_operand();
509 ModelAction *act = get_last_action(blocking->get_id());
510 synchronize(act, curr);
514 Thread *blocking = curr->get_thread_operand();
515 ModelAction *act = get_last_action(blocking->get_id());
516 synchronize(act, curr);
517 break; // WL: to be add (modified)
520 case THREADONLY_FINISH:
521 case THREAD_FINISH: {
522 Thread *th = get_thread(curr);
523 if (curr->get_type() == THREAD_FINISH &&
524 th == model->getInitThread()) {
530 /* Wake up any joining threads */
531 for (unsigned int i = 0;i < get_num_threads();i++) {
532 Thread *waiting = get_thread(int_to_id(i));
533 if (waiting->waiting_on() == th &&
534 waiting->get_pending()->is_thread_join())
535 scheduler->wake(waiting);
549 * Initialize the current action by performing one or more of the following
550 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
551 * manipulating backtracking sets, allocating and
552 * initializing clock vectors, and computing the promises to fulfill.
554 * @param curr The current action, as passed from the user context; may be
555 * freed/invalidated after the execution of this function, with a different
556 * action "returned" its place (pass-by-reference)
557 * @return True if curr is a newly-explored action; false otherwise
559 bool ModelExecution::initialize_curr_action(ModelAction **curr)
561 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
562 ModelAction *newcurr = process_rmw(*curr);
568 ModelAction *newcurr = *curr;
570 newcurr->set_seq_number(get_next_seq_num());
571 /* Always compute new clock vector */
572 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
574 /* Assign most recent release fence */
575 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
577 return true; /* This was a new ModelAction */
582 * @brief Establish reads-from relation between two actions
584 * Perform basic operations involved with establishing a concrete rf relation,
585 * including setting the ModelAction data and checking for release sequences.
587 * @param act The action that is reading (must be a read)
588 * @param rf The action from which we are reading (must be a write)
590 * @return True if this read established synchronization
593 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
596 ASSERT(rf->is_write());
598 act->set_read_from(rf);
599 if (act->is_acquire()) {
600 ClockVector *cv = get_hb_from_write(rf);
603 act->get_cv()->merge(cv);
608 * @brief Synchronizes two actions
610 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
611 * This function performs the synchronization as well as providing other hooks
612 * for other checks along with synchronization.
614 * @param first The left-hand side of the synchronizes-with relation
615 * @param second The right-hand side of the synchronizes-with relation
616 * @return True if the synchronization was successful (i.e., was consistent
617 * with the execution order); false otherwise
619 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
621 if (*second < *first) {
622 set_bad_synchronization();
625 return second->synchronize_with(first);
629 * @brief Check whether a model action is enabled.
631 * Checks whether an operation would be successful (i.e., is a lock already
632 * locked, or is the joined thread already complete).
634 * For yield-blocking, yields are never enabled.
636 * @param curr is the ModelAction to check whether it is enabled.
637 * @return a bool that indicates whether the action is enabled.
639 bool ModelExecution::check_action_enabled(ModelAction *curr) {
640 if (curr->is_lock()) {
641 cdsc::mutex *lock = curr->get_mutex();
642 struct cdsc::mutex_state *state = lock->get_state();
645 } else if (curr->is_thread_join()) {
646 Thread *blocking = curr->get_thread_operand();
647 if (!blocking->is_complete()) {
650 } else if (curr->is_sleep()) {
651 if (!fuzzer->shouldSleep(curr))
659 * This is the heart of the model checker routine. It performs model-checking
660 * actions corresponding to a given "current action." Among other processes, it
661 * calculates reads-from relationships, updates synchronization clock vectors,
662 * forms a memory_order constraints graph, and handles replay/backtrack
663 * execution when running permutations of previously-observed executions.
665 * @param curr The current action to process
666 * @return The ModelAction that is actually executed; may be different than
669 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
672 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
673 bool newly_explored = initialize_curr_action(&curr);
677 wake_up_sleeping_actions(curr);
679 /* Add the action to lists before any other model-checking tasks */
680 if (!second_part_of_rmw && curr->get_type() != NOOP)
681 add_action_to_lists(curr);
683 if (curr->is_write())
684 add_write_to_lists(curr);
686 SnapVector<ModelAction *> * rf_set = NULL;
687 /* Build may_read_from set for newly-created actions */
688 if (newly_explored && curr->is_read())
689 rf_set = build_may_read_from(curr);
691 process_thread_action(curr);
693 if (curr->is_read() && !second_part_of_rmw) {
694 process_read(curr, rf_set);
697 ASSERT(rf_set == NULL);
700 if (curr->is_write())
703 if (curr->is_fence())
706 if (curr->is_mutex_op())
713 * This is the strongest feasibility check available.
714 * @return whether the current trace (partial or complete) must be a prefix of
717 bool ModelExecution::isfeasibleprefix() const
719 return !is_infeasible();
723 * Print disagnostic information about an infeasible execution
724 * @param prefix A string to prefix the output with; if NULL, then a default
725 * message prefix will be provided
727 void ModelExecution::print_infeasibility(const char *prefix) const
731 if (priv->bad_synchronization)
732 ptr += sprintf(ptr, "[bad sw ordering]");
734 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
738 * Check if the current partial trace is infeasible. Does not check any
739 * end-of-execution flags, which might rule out the execution. Thus, this is
740 * useful only for ruling an execution as infeasible.
741 * @return whether the current partial trace is infeasible.
743 bool ModelExecution::is_infeasible() const
745 return priv->bad_synchronization;
748 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
749 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
750 ModelAction *lastread = get_last_action(act->get_tid());
751 lastread->process_rmw(act);
753 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
759 * @brief Updates the mo_graph with the constraints imposed from the current
762 * Basic idea is the following: Go through each other thread and find
763 * the last action that happened before our read. Two cases:
765 * -# The action is a write: that write must either occur before
766 * the write we read from or be the write we read from.
767 * -# The action is a read: the write that that action read from
768 * must occur before the write we read from or be the same write.
770 * @param curr The current action. Must be a read.
771 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
772 * @return True if modification order edges were added; false otherwise
775 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
777 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
779 ASSERT(curr->is_read());
781 /* Last SC fence in the current thread */
782 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
784 int tid = curr->get_tid();
785 ModelAction *prev_same_thread = NULL;
786 /* Iterate over all threads */
787 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
788 /* Last SC fence in thread tid */
789 ModelAction *last_sc_fence_thread_local = NULL;
791 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
793 /* Last SC fence in thread tid, before last SC fence in current thread */
794 ModelAction *last_sc_fence_thread_before = NULL;
795 if (last_sc_fence_local)
796 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
798 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
799 if (prev_same_thread != NULL &&
800 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
801 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
805 /* Iterate over actions in thread, starting from most recent */
806 action_list_t *list = &(*thrd_lists)[tid];
807 sllnode<ModelAction *> * rit;
808 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
809 ModelAction *act = rit->getVal();
814 /* Don't want to add reflexive edges on 'rf' */
815 if (act->equals(rf)) {
816 if (act->happens_before(curr))
822 if (act->is_write()) {
823 /* C++, Section 29.3 statement 5 */
824 if (curr->is_seqcst() && last_sc_fence_thread_local &&
825 *act < *last_sc_fence_thread_local) {
826 if (mo_graph->checkReachable(rf, act))
828 priorset->push_back(act);
831 /* C++, Section 29.3 statement 4 */
832 else if (act->is_seqcst() && last_sc_fence_local &&
833 *act < *last_sc_fence_local) {
834 if (mo_graph->checkReachable(rf, act))
836 priorset->push_back(act);
839 /* C++, Section 29.3 statement 6 */
840 else if (last_sc_fence_thread_before &&
841 *act < *last_sc_fence_thread_before) {
842 if (mo_graph->checkReachable(rf, act))
844 priorset->push_back(act);
850 * Include at most one act per-thread that "happens
853 if (act->happens_before(curr)) {
855 if (last_sc_fence_local == NULL ||
856 (*last_sc_fence_local < *act)) {
857 prev_same_thread = act;
860 if (act->is_write()) {
861 if (mo_graph->checkReachable(rf, act))
863 priorset->push_back(act);
865 const ModelAction *prevrf = act->get_reads_from();
866 if (!prevrf->equals(rf)) {
867 if (mo_graph->checkReachable(rf, prevrf))
869 priorset->push_back(prevrf);
871 if (act->get_tid() == curr->get_tid()) {
872 //Can prune curr from obj list
885 * Updates the mo_graph with the constraints imposed from the current write.
887 * Basic idea is the following: Go through each other thread and find
888 * the lastest action that happened before our write. Two cases:
890 * (1) The action is a write => that write must occur before
893 * (2) The action is a read => the write that that action read from
894 * must occur before the current write.
896 * This method also handles two other issues:
898 * (I) Sequential Consistency: Making sure that if the current write is
899 * seq_cst, that it occurs after the previous seq_cst write.
901 * (II) Sending the write back to non-synchronizing reads.
903 * @param curr The current action. Must be a write.
904 * @param send_fv A vector for stashing reads to which we may pass our future
905 * value. If NULL, then don't record any future values.
906 * @return True if modification order edges were added; false otherwise
908 void ModelExecution::w_modification_order(ModelAction *curr)
910 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
912 ASSERT(curr->is_write());
914 SnapList<ModelAction *> edgeset;
916 if (curr->is_seqcst()) {
917 /* We have to at least see the last sequentially consistent write,
918 so we are initialized. */
919 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
920 if (last_seq_cst != NULL) {
921 edgeset.push_back(last_seq_cst);
923 //update map for next query
924 obj_last_sc_map.put(curr->get_location(), curr);
927 /* Last SC fence in the current thread */
928 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
930 /* Iterate over all threads */
931 for (i = 0;i < thrd_lists->size();i++) {
932 /* Last SC fence in thread i, before last SC fence in current thread */
933 ModelAction *last_sc_fence_thread_before = NULL;
934 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
935 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
937 /* Iterate over actions in thread, starting from most recent */
938 action_list_t *list = &(*thrd_lists)[i];
939 sllnode<ModelAction*>* rit;
940 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
941 ModelAction *act = rit->getVal();
944 * 1) If RMW and it actually read from something, then we
945 * already have all relevant edges, so just skip to next
948 * 2) If RMW and it didn't read from anything, we should
949 * whatever edge we can get to speed up convergence.
951 * 3) If normal write, we need to look at earlier actions, so
952 * continue processing list.
954 if (curr->is_rmw()) {
955 if (curr->get_reads_from() != NULL)
963 /* C++, Section 29.3 statement 7 */
964 if (last_sc_fence_thread_before && act->is_write() &&
965 *act < *last_sc_fence_thread_before) {
966 edgeset.push_back(act);
971 * Include at most one act per-thread that "happens
974 if (act->happens_before(curr)) {
976 * Note: if act is RMW, just add edge:
978 * The following edge should be handled elsewhere:
979 * readfrom(act) --mo--> act
982 edgeset.push_back(act);
983 else if (act->is_read()) {
984 //if previous read accessed a null, just keep going
985 edgeset.push_back(act->get_reads_from());
991 mo_graph->addEdges(&edgeset, curr);
996 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
997 * some constraints. This method checks one the following constraint (others
998 * require compiler support):
1000 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1001 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1003 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1005 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1007 /* Iterate over all threads */
1008 for (i = 0;i < thrd_lists->size();i++) {
1009 const ModelAction *write_after_read = NULL;
1011 /* Iterate over actions in thread, starting from most recent */
1012 action_list_t *list = &(*thrd_lists)[i];
1013 sllnode<ModelAction *>* rit;
1014 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1015 ModelAction *act = rit->getVal();
1017 /* Don't disallow due to act == reader */
1018 if (!reader->happens_before(act) || reader == act)
1020 else if (act->is_write())
1021 write_after_read = act;
1022 else if (act->is_read() && act->get_reads_from() != NULL)
1023 write_after_read = act->get_reads_from();
1026 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1033 * Computes the clock vector that happens before propagates from this write.
1035 * @param rf The action that might be part of a release sequence. Must be a
1037 * @return ClockVector of happens before relation.
1040 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1041 SnapVector<ModelAction *> * processset = NULL;
1042 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1043 ASSERT(rf->is_write());
1044 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1046 if (processset == NULL)
1047 processset = new SnapVector<ModelAction *>();
1048 processset->push_back(rf);
1051 int i = (processset == NULL) ? 0 : processset->size();
1053 ClockVector * vec = NULL;
1055 if (rf->get_rfcv() != NULL) {
1056 vec = rf->get_rfcv();
1057 } else if (rf->is_acquire() && rf->is_release()) {
1059 } else if (rf->is_release() && !rf->is_rmw()) {
1061 } else if (rf->is_release()) {
1062 //have rmw that is release and doesn't have a rfcv
1063 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1066 //operation that isn't release
1067 if (rf->get_last_fence_release()) {
1069 vec = rf->get_last_fence_release()->get_cv();
1071 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1077 rf = (*processset)[i];
1081 if (processset != NULL)
1087 * Performs various bookkeeping operations for the current ModelAction. For
1088 * instance, adds action to the per-object, per-thread action vector and to the
1089 * action trace list of all thread actions.
1091 * @param act is the ModelAction to add.
1093 void ModelExecution::add_action_to_lists(ModelAction *act)
1095 int tid = id_to_int(act->get_tid());
1096 ModelAction *uninit = NULL;
1098 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1099 if (list->empty() && act->is_atomic_var()) {
1100 uninit = get_uninitialized_action(act);
1101 uninit_id = id_to_int(uninit->get_tid());
1102 list->push_front(uninit);
1103 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1104 if (uninit_id >= (int)vec->size()) {
1105 int oldsize = (int) vec->size();
1106 vec->resize(uninit_id + 1);
1107 for(int i=oldsize;i<uninit_id+1;i++) {
1108 new(&(*vec)[i]) action_list_t();
1111 (*vec)[uninit_id].push_front(uninit);
1113 list->push_back(act);
1115 // Update action trace, a total order of all actions
1116 action_trace.push_back(act);
1118 action_trace.push_front(uninit);
1120 // Update obj_thrd_map, a per location, per thread, order of actions
1121 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1122 if (tid >= (int)vec->size()) {
1123 uint oldsize =vec->size();
1124 vec->resize(priv->next_thread_id);
1125 for(uint i=oldsize;i<priv->next_thread_id;i++)
1126 new (&(*vec)[i]) action_list_t();
1128 (*vec)[tid].push_back(act);
1130 (*vec)[uninit_id].push_front(uninit);
1132 // Update thrd_last_action, the last action taken by each thrad
1133 if ((int)thrd_last_action.size() <= tid)
1134 thrd_last_action.resize(get_num_threads());
1135 thrd_last_action[tid] = act;
1137 thrd_last_action[uninit_id] = uninit;
1139 // Update thrd_last_fence_release, the last release fence taken by each thread
1140 if (act->is_fence() && act->is_release()) {
1141 if ((int)thrd_last_fence_release.size() <= tid)
1142 thrd_last_fence_release.resize(get_num_threads());
1143 thrd_last_fence_release[tid] = act;
1146 if (act->is_wait()) {
1147 void *mutex_loc = (void *) act->get_value();
1148 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1150 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1151 if (tid >= (int)vec->size()) {
1152 uint oldsize = vec->size();
1153 vec->resize(priv->next_thread_id);
1154 for(uint i=oldsize;i<priv->next_thread_id;i++)
1155 new (&(*vec)[i]) action_list_t();
1157 (*vec)[tid].push_back(act);
1161 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1162 sllnode<ModelAction*> * rit = list->end();
1163 modelclock_t next_seq = act->get_seq_number();
1164 if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
1165 list->push_back(act);
1167 for(;rit != NULL;rit=rit->getPrev()) {
1168 if (rit->getVal()->get_seq_number() == next_seq) {
1169 list->insertAfter(rit, act);
1176 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1177 sllnode<ModelAction*> * rit = list->end();
1178 modelclock_t next_seq = act->get_seq_number();
1180 act->create_cv(NULL);
1181 } else if (rit->getVal()->get_seq_number() == next_seq) {
1182 act->create_cv(rit->getVal());
1183 list->push_back(act);
1185 for(;rit != NULL;rit=rit->getPrev()) {
1186 if (rit->getVal()->get_seq_number() == next_seq) {
1187 act->create_cv(rit->getVal());
1188 list->insertAfter(rit, act);
1196 * Performs various bookkeeping operations for a normal write. The
1197 * complication is that we are typically inserting a normal write
1198 * lazily, so we need to insert it into the middle of lists.
1200 * @param act is the ModelAction to add.
1203 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1205 int tid = id_to_int(act->get_tid());
1206 insertIntoActionListAndSetCV(&action_trace, act);
1208 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1209 insertIntoActionList(list, act);
1211 // Update obj_thrd_map, a per location, per thread, order of actions
1212 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1213 if (tid >= (int)vec->size()) {
1214 uint oldsize =vec->size();
1215 vec->resize(priv->next_thread_id);
1216 for(uint i=oldsize;i<priv->next_thread_id;i++)
1217 new (&(*vec)[i]) action_list_t();
1219 insertIntoActionList(&(*vec)[tid],act);
1221 // Update thrd_last_action, the last action taken by each thrad
1222 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1223 thrd_last_action[tid] = act;
1227 void ModelExecution::add_write_to_lists(ModelAction *write) {
1228 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1229 int tid = id_to_int(write->get_tid());
1230 if (tid >= (int)vec->size()) {
1231 uint oldsize =vec->size();
1232 vec->resize(priv->next_thread_id);
1233 for(uint i=oldsize;i<priv->next_thread_id;i++)
1234 new (&(*vec)[i]) action_list_t();
1236 (*vec)[tid].push_back(write);
1240 * @brief Get the last action performed by a particular Thread
1241 * @param tid The thread ID of the Thread in question
1242 * @return The last action in the thread
1244 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1246 int threadid = id_to_int(tid);
1247 if (threadid < (int)thrd_last_action.size())
1248 return thrd_last_action[id_to_int(tid)];
1254 * @brief Get the last fence release performed by a particular Thread
1255 * @param tid The thread ID of the Thread in question
1256 * @return The last fence release in the thread, if one exists; NULL otherwise
1258 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1260 int threadid = id_to_int(tid);
1261 if (threadid < (int)thrd_last_fence_release.size())
1262 return thrd_last_fence_release[id_to_int(tid)];
1268 * Gets the last memory_order_seq_cst write (in the total global sequence)
1269 * performed on a particular object (i.e., memory location), not including the
1271 * @param curr The current ModelAction; also denotes the object location to
1273 * @return The last seq_cst write
1275 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1277 void *location = curr->get_location();
1278 return obj_last_sc_map.get(location);
1282 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1283 * performed in a particular thread, prior to a particular fence.
1284 * @param tid The ID of the thread to check
1285 * @param before_fence The fence from which to begin the search; if NULL, then
1286 * search for the most recent fence in the thread.
1287 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1289 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1291 /* All fences should have location FENCE_LOCATION */
1292 action_list_t *list = obj_map.get(FENCE_LOCATION);
1297 sllnode<ModelAction*>* rit = list->end();
1300 for (;rit != NULL;rit=rit->getPrev())
1301 if (rit->getVal() == before_fence)
1304 ASSERT(rit->getVal() == before_fence);
1308 for (;rit != NULL;rit=rit->getPrev()) {
1309 ModelAction *act = rit->getVal();
1310 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1317 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1318 * location). This function identifies the mutex according to the current
1319 * action, which is presumed to perform on the same mutex.
1320 * @param curr The current ModelAction; also denotes the object location to
1322 * @return The last unlock operation
1324 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1326 void *location = curr->get_location();
1328 action_list_t *list = obj_map.get(location);
1329 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1330 sllnode<ModelAction*>* rit;
1331 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1332 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1333 return rit->getVal();
1337 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1339 ModelAction *parent = get_last_action(tid);
1341 parent = get_thread(tid)->get_creation();
1346 * Returns the clock vector for a given thread.
1347 * @param tid The thread whose clock vector we want
1348 * @return Desired clock vector
1350 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1352 ModelAction *firstaction=get_parent_action(tid);
1353 return firstaction != NULL ? firstaction->get_cv() : NULL;
1356 bool valequals(uint64_t val1, uint64_t val2, int size) {
1359 return ((uint8_t)val1) == ((uint8_t)val2);
1361 return ((uint16_t)val1) == ((uint16_t)val2);
1363 return ((uint32_t)val1) == ((uint32_t)val2);
1373 * Build up an initial set of all past writes that this 'read' action may read
1374 * from, as well as any previously-observed future values that must still be valid.
1376 * @param curr is the current ModelAction that we are exploring; it must be a
1379 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1381 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1383 ASSERT(curr->is_read());
1385 ModelAction *last_sc_write = NULL;
1387 if (curr->is_seqcst())
1388 last_sc_write = get_last_seq_cst_write(curr);
1390 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1392 /* Iterate over all threads */
1393 for (i = 0;i < thrd_lists->size();i++) {
1394 /* Iterate over actions in thread, starting from most recent */
1395 action_list_t *list = &(*thrd_lists)[i];
1396 sllnode<ModelAction *> * rit;
1397 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1398 ModelAction *act = rit->getVal();
1403 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1404 bool allow_read = true;
1406 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1409 /* Need to check whether we will have two RMW reading from the same value */
1410 if (curr->is_rmwr()) {
1411 /* It is okay if we have a failing CAS */
1412 if (!curr->is_rmwrcas() ||
1413 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1414 //Need to make sure we aren't the second RMW
1415 CycleNode * node = mo_graph->getNode_noCreate(act);
1416 if (node != NULL && node->getRMW() != NULL) {
1417 //we are the second RMW
1424 /* Only add feasible reads */
1425 rf_set->push_back(act);
1428 /* Include at most one act per-thread that "happens before" curr */
1429 if (act->happens_before(curr))
1434 if (DBG_ENABLED()) {
1435 model_print("Reached read action:\n");
1437 model_print("End printing read_from_past\n");
1443 * @brief Get an action representing an uninitialized atomic
1445 * This function may create a new one.
1447 * @param curr The current action, which prompts the creation of an UNINIT action
1448 * @return A pointer to the UNINIT ModelAction
1450 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1452 ModelAction *act = curr->get_uninit_action();
1454 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1455 curr->set_uninit_action(act);
1457 act->create_cv(NULL);
1461 static void print_list(action_list_t *list)
1463 sllnode<ModelAction*> *it;
1465 model_print("------------------------------------------------------------------------------------\n");
1466 model_print("# t Action type MO Location Value Rf CV\n");
1467 model_print("------------------------------------------------------------------------------------\n");
1469 unsigned int hash = 0;
1471 for (it = list->begin();it != NULL;it=it->getNext()) {
1472 const ModelAction *act = it->getVal();
1473 if (act->get_seq_number() > 0)
1475 hash = hash^(hash<<3)^(it->getVal()->hash());
1477 model_print("HASH %u\n", hash);
1478 model_print("------------------------------------------------------------------------------------\n");
1481 #if SUPPORT_MOD_ORDER_DUMP
1482 void ModelExecution::dumpGraph(char *filename)
1485 sprintf(buffer, "%s.dot", filename);
1486 FILE *file = fopen(buffer, "w");
1487 fprintf(file, "digraph %s {\n", filename);
1488 mo_graph->dumpNodes(file);
1489 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1491 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1492 ModelAction *act = it->getVal();
1493 if (act->is_read()) {
1494 mo_graph->dot_print_node(file, act);
1495 mo_graph->dot_print_edge(file,
1496 act->get_reads_from(),
1498 "label=\"rf\", color=red, weight=2");
1500 if (thread_array[act->get_tid()]) {
1501 mo_graph->dot_print_edge(file,
1502 thread_array[id_to_int(act->get_tid())],
1504 "label=\"sb\", color=blue, weight=400");
1507 thread_array[act->get_tid()] = act;
1509 fprintf(file, "}\n");
1510 model_free(thread_array);
1515 /** @brief Prints an execution trace summary. */
1516 void ModelExecution::print_summary()
1518 #if SUPPORT_MOD_ORDER_DUMP
1519 char buffername[100];
1520 sprintf(buffername, "exec%04u", get_execution_number());
1521 mo_graph->dumpGraphToFile(buffername);
1522 sprintf(buffername, "graph%04u", get_execution_number());
1523 dumpGraph(buffername);
1526 model_print("Execution trace %d:", get_execution_number());
1527 if (isfeasibleprefix()) {
1528 if (scheduler->all_threads_sleeping())
1529 model_print(" SLEEP-SET REDUNDANT");
1530 if (have_bug_reports())
1531 model_print(" DETECTED BUG(S)");
1533 print_infeasibility(" INFEASIBLE");
1536 print_list(&action_trace);
1542 * Add a Thread to the system for the first time. Should only be called once
1544 * @param t The Thread to add
1546 void ModelExecution::add_thread(Thread *t)
1548 unsigned int i = id_to_int(t->get_id());
1549 if (i >= thread_map.size())
1550 thread_map.resize(i + 1);
1552 if (!t->is_model_thread())
1553 scheduler->add_thread(t);
1557 * @brief Get a Thread reference by its ID
1558 * @param tid The Thread's ID
1559 * @return A Thread reference
1561 Thread * ModelExecution::get_thread(thread_id_t tid) const
1563 unsigned int i = id_to_int(tid);
1564 if (i < thread_map.size())
1565 return thread_map[i];
1570 * @brief Get a reference to the Thread in which a ModelAction was executed
1571 * @param act The ModelAction
1572 * @return A Thread reference
1574 Thread * ModelExecution::get_thread(const ModelAction *act) const
1576 return get_thread(act->get_tid());
1580 * @brief Get a Thread reference by its pthread ID
1581 * @param index The pthread's ID
1582 * @return A Thread reference
1584 Thread * ModelExecution::get_pthread(pthread_t pid) {
1590 uint32_t thread_id = x.v;
1591 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1596 * @brief Check if a Thread is currently enabled
1597 * @param t The Thread to check
1598 * @return True if the Thread is currently enabled
1600 bool ModelExecution::is_enabled(Thread *t) const
1602 return scheduler->is_enabled(t);
1606 * @brief Check if a Thread is currently enabled
1607 * @param tid The ID of the Thread to check
1608 * @return True if the Thread is currently enabled
1610 bool ModelExecution::is_enabled(thread_id_t tid) const
1612 return scheduler->is_enabled(tid);
1616 * @brief Select the next thread to execute based on the curren action
1618 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1619 * actions should be followed by the execution of their child thread. In either
1620 * case, the current action should determine the next thread schedule.
1622 * @param curr The current action
1623 * @return The next thread to run, if the current action will determine this
1624 * selection; otherwise NULL
1626 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1628 /* Do not split atomic RMW */
1629 if (curr->is_rmwr())
1630 return get_thread(curr);
1631 /* Follow CREATE with the created thread */
1632 /* which is not needed, because model.cc takes care of this */
1633 if (curr->get_type() == THREAD_CREATE)
1634 return curr->get_thread_operand();
1635 if (curr->get_type() == PTHREAD_CREATE) {
1636 return curr->get_thread_operand();
1642 * Takes the next step in the execution, if possible.
1643 * @param curr The current step to take
1644 * @return Returns the next Thread to run, if any; NULL if this execution
1647 Thread * ModelExecution::take_step(ModelAction *curr)
1649 Thread *curr_thrd = get_thread(curr);
1650 ASSERT(curr_thrd->get_state() == THREAD_READY);
1652 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1653 curr = check_current_action(curr);
1656 /* Process this action in ModelHistory for records */
1657 model->get_history()->process_action( curr, curr->get_tid() );
1659 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1660 scheduler->remove_thread(curr_thrd);
1662 return action_select_next_thread(curr);
1665 Fuzzer * ModelExecution::getFuzzer() {