11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
19 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 model_snapshot_members() :
26 /* First thread created will have id INITIAL_THREAD_ID */
27 next_thread_id(INITIAL_THREAD_ID),
28 used_sequence_numbers(0),
31 bad_synchronization(false),
35 ~model_snapshot_members() {
36 for (unsigned int i = 0;i < bugs.size();i++)
38 for (unsigned int i = 0;i < dataraces.size(); i++)
44 unsigned int next_thread_id;
45 modelclock_t used_sequence_numbers;
46 SnapVector<bug_message *> bugs;
47 SnapVector<bug_message *> dataraces;
48 /** @brief Incorrectly-ordered synchronization was made */
49 bool bad_synchronization;
55 /** @brief Constructor */
56 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
61 thread_map(2), /* We'll always need at least 2 threads */
65 condvar_waiters_map(),
69 thrd_last_fence_release(),
70 priv(new struct model_snapshot_members ()),
71 mo_graph(new CycleGraph()),
74 thrd_func_inst_lists()
76 /* Initialize a model-checker thread, for special ModelActions */
77 model_thread = new Thread(get_next_id());
78 add_thread(model_thread);
79 scheduler->register_engine(this);
82 /** @brief Destructor */
83 ModelExecution::~ModelExecution()
85 for (unsigned int i = 0;i < get_num_threads();i++)
86 delete get_thread(int_to_id(i));
92 int ModelExecution::get_execution_number() const
94 return model->get_execution_number();
97 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
99 action_list_t *tmp = hash->get(ptr);
101 tmp = new action_list_t();
107 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
109 SnapVector<action_list_t> *tmp = hash->get(ptr);
111 tmp = new SnapVector<action_list_t>();
117 /** @return a thread ID for a new Thread */
118 thread_id_t ModelExecution::get_next_id()
120 return priv->next_thread_id++;
123 /** @return the number of user threads created during this execution */
124 unsigned int ModelExecution::get_num_threads() const
126 return priv->next_thread_id;
129 /** @return a sequence number for a new ModelAction */
130 modelclock_t ModelExecution::get_next_seq_num()
132 return ++priv->used_sequence_numbers;
136 * @brief Should the current action wake up a given thread?
138 * @param curr The current action
139 * @param thread The thread that we might wake up
140 * @return True, if we should wake up the sleeping thread; false otherwise
142 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
144 const ModelAction *asleep = thread->get_pending();
145 /* Don't allow partial RMW to wake anyone up */
148 /* Synchronizing actions may have been backtracked */
149 if (asleep->could_synchronize_with(curr))
151 /* All acquire/release fences and fence-acquire/store-release */
152 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
154 /* Fence-release + store can awake load-acquire on the same location */
155 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
156 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
157 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
163 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
165 for (unsigned int i = 0;i < get_num_threads();i++) {
166 Thread *thr = get_thread(int_to_id(i));
167 if (scheduler->is_sleep_set(thr)) {
168 if (should_wake_up(curr, thr))
169 /* Remove this thread from sleep set */
170 scheduler->remove_sleep(thr);
175 /** @brief Alert the model-checker that an incorrectly-ordered
176 * synchronization was made */
177 void ModelExecution::set_bad_synchronization()
179 priv->bad_synchronization = true;
182 bool ModelExecution::assert_bug(const char *msg)
184 priv->bugs.push_back(new bug_message(msg));
186 if (isfeasibleprefix()) {
193 /* @brief Put data races in a different list from other bugs. The purpose
194 * is to continue the program untill the number of data races exceeds a
197 /* TODO: check whether set_assert should be called */
198 bool ModelExecution::assert_race(const char *msg)
200 priv->dataraces.push_back(new bug_message(msg));
202 if (isfeasibleprefix()) {
209 /** @return True, if any bugs have been reported for this execution */
210 bool ModelExecution::have_bug_reports() const
212 return priv->bugs.size() != 0;
215 /** @return True, if any fatal bugs have been reported for this execution.
216 * Any bug other than a data race is considered a fatal bug. Data races
217 * are not considered fatal unless the number of races is exceeds
218 * a threshold (temporarily set as 15).
220 bool ModelExecution::have_fatal_bug_reports() const
222 return priv->bugs.size() != 0 || priv->dataraces.size() >= 15;
225 SnapVector<bug_message *> * ModelExecution::get_bugs() const
231 * Check whether the current trace has triggered an assertion which should halt
234 * @return True, if the execution should be aborted; false otherwise
236 bool ModelExecution::has_asserted() const
238 return priv->asserted;
242 * Trigger a trace assertion which should cause this execution to be halted.
243 * This can be due to a detected bug or due to an infeasibility that should
246 void ModelExecution::set_assert()
248 priv->asserted = true;
252 * Check if we are in a deadlock. Should only be called at the end of an
253 * execution, although it should not give false positives in the middle of an
254 * execution (there should be some ENABLED thread).
256 * @return True if program is in a deadlock; false otherwise
258 bool ModelExecution::is_deadlocked() const
260 bool blocking_threads = false;
261 for (unsigned int i = 0;i < get_num_threads();i++) {
262 thread_id_t tid = int_to_id(i);
265 Thread *t = get_thread(tid);
266 if (!t->is_model_thread() && t->get_pending())
267 blocking_threads = true;
269 return blocking_threads;
273 * Check if this is a complete execution. That is, have all thread completed
274 * execution (rather than exiting because sleep sets have forced a redundant
277 * @return True if the execution is complete.
279 bool ModelExecution::is_complete_execution() const
281 for (unsigned int i = 0;i < get_num_threads();i++)
282 if (is_enabled(int_to_id(i)))
287 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
288 uint64_t value = *((const uint64_t *) location);
289 modelclock_t storeclock;
290 thread_id_t storethread;
291 getStoreThreadAndClock(location, &storethread, &storeclock);
292 setAtomicStoreFlag(location);
293 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
294 act->set_seq_number(storeclock);
295 add_normal_write_to_lists(act);
296 add_write_to_lists(act);
297 w_modification_order(act);
303 * Processes a read model action.
304 * @param curr is the read model action to process.
305 * @param rf_set is the set of model actions we can possibly read from
306 * @return True if processing this read updates the mo_graph.
308 void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
310 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
311 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
312 if (hasnonatomicstore) {
313 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
314 rf_set->push_back(nonatomicstore);
318 int index = fuzzer->selectWrite(curr, rf_set);
319 ModelAction *rf = (*rf_set)[index];
323 bool canprune = false;
324 if (r_modification_order(curr, rf, priorset, &canprune)) {
325 for(unsigned int i=0;i<priorset->size();i++) {
326 mo_graph->addEdge((*priorset)[i], rf);
329 get_thread(curr)->set_return_value(curr->get_return_value());
331 if (canprune && curr->get_type() == ATOMIC_READ) {
332 int tid = id_to_int(curr->get_tid());
333 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
338 (*rf_set)[index] = rf_set->back();
344 * Processes a lock, trylock, or unlock model action. @param curr is
345 * the read model action to process.
347 * The try lock operation checks whether the lock is taken. If not,
348 * it falls to the normal lock operation case. If so, it returns
351 * The lock operation has already been checked that it is enabled, so
352 * it just grabs the lock and synchronizes with the previous unlock.
354 * The unlock operation has to re-enable all of the threads that are
355 * waiting on the lock.
357 * @return True if synchronization was updated; false otherwise
359 bool ModelExecution::process_mutex(ModelAction *curr)
361 cdsc::mutex *mutex = curr->get_mutex();
362 struct cdsc::mutex_state *state = NULL;
365 state = mutex->get_state();
367 switch (curr->get_type()) {
368 case ATOMIC_TRYLOCK: {
369 bool success = !state->locked;
370 curr->set_try_lock(success);
372 get_thread(curr)->set_return_value(0);
375 get_thread(curr)->set_return_value(1);
377 //otherwise fall into the lock case
379 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
380 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
381 // assert_bug("Lock access before initialization");
382 state->locked = get_thread(curr);
383 ModelAction *unlock = get_last_unlock(curr);
384 //synchronize with the previous unlock statement
385 if (unlock != NULL) {
386 synchronize(unlock, curr);
392 case ATOMIC_UNLOCK: {
393 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
395 /* wake up the other threads */
396 for (unsigned int i = 0;i < get_num_threads();i++) {
397 Thread *t = get_thread(int_to_id(i));
398 Thread *curr_thrd = get_thread(curr);
399 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
403 /* unlock the lock - after checking who was waiting on it */
404 state->locked = NULL;
406 if (!curr->is_wait())
407 break;/* The rest is only for ATOMIC_WAIT */
411 case ATOMIC_NOTIFY_ALL: {
412 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
413 //activate all the waiting threads
414 for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
415 scheduler->wake(get_thread(*rit));
420 case ATOMIC_NOTIFY_ONE: {
421 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
422 if (waiters->size() != 0) {
423 Thread * thread = fuzzer->selectNotify(waiters);
424 scheduler->wake(thread);
436 * Process a write ModelAction
437 * @param curr The ModelAction to process
438 * @return True if the mo_graph was updated or promises were resolved
440 void ModelExecution::process_write(ModelAction *curr)
442 w_modification_order(curr);
443 get_thread(curr)->set_return_value(VALUE_NONE);
447 * Process a fence ModelAction
448 * @param curr The ModelAction to process
449 * @return True if synchronization was updated
451 bool ModelExecution::process_fence(ModelAction *curr)
454 * fence-relaxed: no-op
455 * fence-release: only log the occurence (not in this function), for
456 * use in later synchronization
457 * fence-acquire (this function): search for hypothetical release
459 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
461 bool updated = false;
462 if (curr->is_acquire()) {
463 action_list_t *list = &action_trace;
464 action_list_t::reverse_iterator rit;
465 /* Find X : is_read(X) && X --sb-> curr */
466 for (rit = list->rbegin();rit != list->rend();rit++) {
467 ModelAction *act = *rit;
470 if (act->get_tid() != curr->get_tid())
472 /* Stop at the beginning of the thread */
473 if (act->is_thread_start())
475 /* Stop once we reach a prior fence-acquire */
476 if (act->is_fence() && act->is_acquire())
480 /* read-acquire will find its own release sequences */
481 if (act->is_acquire())
484 /* Establish hypothetical release sequences */
485 ClockVector *cv = get_hb_from_write(act);
486 if (curr->get_cv()->merge(cv))
494 * @brief Process the current action for thread-related activity
496 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
497 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
498 * synchronization, etc. This function is a no-op for non-THREAD actions
499 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
501 * @param curr The current action
502 * @return True if synchronization was updated or a thread completed
504 bool ModelExecution::process_thread_action(ModelAction *curr)
506 bool updated = false;
508 switch (curr->get_type()) {
509 case THREAD_CREATE: {
510 thrd_t *thrd = (thrd_t *)curr->get_location();
511 struct thread_params *params = (struct thread_params *)curr->get_value();
512 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
513 curr->set_thread_operand(th);
515 th->set_creation(curr);
518 case PTHREAD_CREATE: {
519 (*(uint32_t *)curr->get_location()) = pthread_counter++;
521 struct pthread_params *params = (struct pthread_params *)curr->get_value();
522 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
523 curr->set_thread_operand(th);
525 th->set_creation(curr);
527 if ( pthread_map.size() < pthread_counter )
528 pthread_map.resize( pthread_counter );
529 pthread_map[ pthread_counter-1 ] = th;
534 Thread *blocking = curr->get_thread_operand();
535 ModelAction *act = get_last_action(blocking->get_id());
536 synchronize(act, curr);
537 updated = true; /* trigger rel-seq checks */
541 Thread *blocking = curr->get_thread_operand();
542 ModelAction *act = get_last_action(blocking->get_id());
543 synchronize(act, curr);
544 updated = true; /* trigger rel-seq checks */
545 break; // WL: to be add (modified)
548 case THREAD_FINISH: {
549 Thread *th = get_thread(curr);
550 /* Wake up any joining threads */
551 for (unsigned int i = 0;i < get_num_threads();i++) {
552 Thread *waiting = get_thread(int_to_id(i));
553 if (waiting->waiting_on() == th &&
554 waiting->get_pending()->is_thread_join())
555 scheduler->wake(waiting);
558 updated = true; /* trigger rel-seq checks */
572 * Initialize the current action by performing one or more of the following
573 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
574 * manipulating backtracking sets, allocating and
575 * initializing clock vectors, and computing the promises to fulfill.
577 * @param curr The current action, as passed from the user context; may be
578 * freed/invalidated after the execution of this function, with a different
579 * action "returned" its place (pass-by-reference)
580 * @return True if curr is a newly-explored action; false otherwise
582 bool ModelExecution::initialize_curr_action(ModelAction **curr)
584 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
585 ModelAction *newcurr = process_rmw(*curr);
591 ModelAction *newcurr = *curr;
593 newcurr->set_seq_number(get_next_seq_num());
594 /* Always compute new clock vector */
595 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
597 /* Assign most recent release fence */
598 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
600 return true; /* This was a new ModelAction */
605 * @brief Establish reads-from relation between two actions
607 * Perform basic operations involved with establishing a concrete rf relation,
608 * including setting the ModelAction data and checking for release sequences.
610 * @param act The action that is reading (must be a read)
611 * @param rf The action from which we are reading (must be a write)
613 * @return True if this read established synchronization
616 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
619 ASSERT(rf->is_write());
621 act->set_read_from(rf);
622 if (act->is_acquire()) {
623 ClockVector *cv = get_hb_from_write(rf);
626 act->get_cv()->merge(cv);
631 * @brief Synchronizes two actions
633 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
634 * This function performs the synchronization as well as providing other hooks
635 * for other checks along with synchronization.
637 * @param first The left-hand side of the synchronizes-with relation
638 * @param second The right-hand side of the synchronizes-with relation
639 * @return True if the synchronization was successful (i.e., was consistent
640 * with the execution order); false otherwise
642 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
644 if (*second < *first) {
645 set_bad_synchronization();
648 return second->synchronize_with(first);
652 * @brief Check whether a model action is enabled.
654 * Checks whether an operation would be successful (i.e., is a lock already
655 * locked, or is the joined thread already complete).
657 * For yield-blocking, yields are never enabled.
659 * @param curr is the ModelAction to check whether it is enabled.
660 * @return a bool that indicates whether the action is enabled.
662 bool ModelExecution::check_action_enabled(ModelAction *curr) {
663 if (curr->is_lock()) {
664 cdsc::mutex *lock = curr->get_mutex();
665 struct cdsc::mutex_state *state = lock->get_state();
668 } else if (curr->is_thread_join()) {
669 Thread *blocking = curr->get_thread_operand();
670 if (!blocking->is_complete()) {
679 * This is the heart of the model checker routine. It performs model-checking
680 * actions corresponding to a given "current action." Among other processes, it
681 * calculates reads-from relationships, updates synchronization clock vectors,
682 * forms a memory_order constraints graph, and handles replay/backtrack
683 * execution when running permutations of previously-observed executions.
685 * @param curr The current action to process
686 * @return The ModelAction that is actually executed; may be different than
689 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
692 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
693 bool newly_explored = initialize_curr_action(&curr);
697 wake_up_sleeping_actions(curr);
699 /* Add the action to lists before any other model-checking tasks */
700 if (!second_part_of_rmw && curr->get_type() != NOOP)
701 add_action_to_lists(curr);
703 if (curr->is_write())
704 add_write_to_lists(curr);
706 SnapVector<ModelAction *> * rf_set = NULL;
707 /* Build may_read_from set for newly-created actions */
708 if (newly_explored && curr->is_read())
709 rf_set = build_may_read_from(curr);
711 process_thread_action(curr);
713 if (curr->is_read() && !second_part_of_rmw) {
714 process_read(curr, rf_set);
717 ASSERT(rf_set == NULL);
720 if (curr->is_write())
723 if (curr->is_fence())
726 if (curr->is_mutex_op())
733 * This is the strongest feasibility check available.
734 * @return whether the current trace (partial or complete) must be a prefix of
737 bool ModelExecution::isfeasibleprefix() const
739 return !is_infeasible();
743 * Print disagnostic information about an infeasible execution
744 * @param prefix A string to prefix the output with; if NULL, then a default
745 * message prefix will be provided
747 void ModelExecution::print_infeasibility(const char *prefix) const
751 if (priv->bad_synchronization)
752 ptr += sprintf(ptr, "[bad sw ordering]");
754 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
758 * Check if the current partial trace is infeasible. Does not check any
759 * end-of-execution flags, which might rule out the execution. Thus, this is
760 * useful only for ruling an execution as infeasible.
761 * @return whether the current partial trace is infeasible.
763 bool ModelExecution::is_infeasible() const
765 return priv->bad_synchronization;
768 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
769 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
770 ModelAction *lastread = get_last_action(act->get_tid());
771 lastread->process_rmw(act);
773 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
779 * @brief Updates the mo_graph with the constraints imposed from the current
782 * Basic idea is the following: Go through each other thread and find
783 * the last action that happened before our read. Two cases:
785 * -# The action is a write: that write must either occur before
786 * the write we read from or be the write we read from.
787 * -# The action is a read: the write that that action read from
788 * must occur before the write we read from or be the same write.
790 * @param curr The current action. Must be a read.
791 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
792 * @return True if modification order edges were added; false otherwise
795 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
797 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
799 ASSERT(curr->is_read());
801 /* Last SC fence in the current thread */
802 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
804 int tid = curr->get_tid();
805 ModelAction *prev_same_thread = NULL;
806 /* Iterate over all threads */
807 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
808 /* Last SC fence in thread tid */
809 ModelAction *last_sc_fence_thread_local = NULL;
811 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
813 /* Last SC fence in thread tid, before last SC fence in current thread */
814 ModelAction *last_sc_fence_thread_before = NULL;
815 if (last_sc_fence_local)
816 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
818 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
819 if (prev_same_thread != NULL &&
820 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
821 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
825 /* Iterate over actions in thread, starting from most recent */
826 action_list_t *list = &(*thrd_lists)[tid];
827 action_list_t::reverse_iterator rit;
828 for (rit = list->rbegin();rit != list->rend();rit++) {
829 ModelAction *act = *rit;
834 /* Don't want to add reflexive edges on 'rf' */
835 if (act->equals(rf)) {
836 if (act->happens_before(curr))
842 if (act->is_write()) {
843 /* C++, Section 29.3 statement 5 */
844 if (curr->is_seqcst() && last_sc_fence_thread_local &&
845 *act < *last_sc_fence_thread_local) {
846 if (mo_graph->checkReachable(rf, act))
848 priorset->push_back(act);
851 /* C++, Section 29.3 statement 4 */
852 else if (act->is_seqcst() && last_sc_fence_local &&
853 *act < *last_sc_fence_local) {
854 if (mo_graph->checkReachable(rf, act))
856 priorset->push_back(act);
859 /* C++, Section 29.3 statement 6 */
860 else if (last_sc_fence_thread_before &&
861 *act < *last_sc_fence_thread_before) {
862 if (mo_graph->checkReachable(rf, act))
864 priorset->push_back(act);
870 * Include at most one act per-thread that "happens
873 if (act->happens_before(curr)) {
875 if (last_sc_fence_local == NULL ||
876 (*last_sc_fence_local < *prev_same_thread)) {
877 prev_same_thread = act;
880 if (act->is_write()) {
881 if (mo_graph->checkReachable(rf, act))
883 priorset->push_back(act);
885 const ModelAction *prevrf = act->get_reads_from();
886 if (!prevrf->equals(rf)) {
887 if (mo_graph->checkReachable(rf, prevrf))
889 priorset->push_back(prevrf);
891 if (act->get_tid() == curr->get_tid()) {
892 //Can prune curr from obj list
905 * Updates the mo_graph with the constraints imposed from the current write.
907 * Basic idea is the following: Go through each other thread and find
908 * the lastest action that happened before our write. Two cases:
910 * (1) The action is a write => that write must occur before
913 * (2) The action is a read => the write that that action read from
914 * must occur before the current write.
916 * This method also handles two other issues:
918 * (I) Sequential Consistency: Making sure that if the current write is
919 * seq_cst, that it occurs after the previous seq_cst write.
921 * (II) Sending the write back to non-synchronizing reads.
923 * @param curr The current action. Must be a write.
924 * @param send_fv A vector for stashing reads to which we may pass our future
925 * value. If NULL, then don't record any future values.
926 * @return True if modification order edges were added; false otherwise
928 void ModelExecution::w_modification_order(ModelAction *curr)
930 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
932 ASSERT(curr->is_write());
934 if (curr->is_seqcst()) {
935 /* We have to at least see the last sequentially consistent write,
936 so we are initialized. */
937 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
938 if (last_seq_cst != NULL) {
939 mo_graph->addEdge(last_seq_cst, curr);
943 /* Last SC fence in the current thread */
944 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
946 /* Iterate over all threads */
947 for (i = 0;i < thrd_lists->size();i++) {
948 /* Last SC fence in thread i, before last SC fence in current thread */
949 ModelAction *last_sc_fence_thread_before = NULL;
950 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
951 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
953 /* Iterate over actions in thread, starting from most recent */
954 action_list_t *list = &(*thrd_lists)[i];
955 action_list_t::reverse_iterator rit;
956 bool force_edge = false;
957 for (rit = list->rbegin();rit != list->rend();rit++) {
958 ModelAction *act = *rit;
961 * 1) If RMW and it actually read from something, then we
962 * already have all relevant edges, so just skip to next
965 * 2) If RMW and it didn't read from anything, we should
966 * whatever edge we can get to speed up convergence.
968 * 3) If normal write, we need to look at earlier actions, so
969 * continue processing list.
972 if (curr->is_rmw()) {
973 if (curr->get_reads_from() != NULL)
981 /* C++, Section 29.3 statement 7 */
982 if (last_sc_fence_thread_before && act->is_write() &&
983 *act < *last_sc_fence_thread_before) {
984 mo_graph->addEdge(act, curr, force_edge);
989 * Include at most one act per-thread that "happens
992 if (act->happens_before(curr)) {
994 * Note: if act is RMW, just add edge:
996 * The following edge should be handled elsewhere:
997 * readfrom(act) --mo--> act
1000 mo_graph->addEdge(act, curr, force_edge);
1001 else if (act->is_read()) {
1002 //if previous read accessed a null, just keep going
1003 mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
1012 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1013 * some constraints. This method checks one the following constraint (others
1014 * require compiler support):
1016 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1017 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1019 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1021 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1023 /* Iterate over all threads */
1024 for (i = 0;i < thrd_lists->size();i++) {
1025 const ModelAction *write_after_read = NULL;
1027 /* Iterate over actions in thread, starting from most recent */
1028 action_list_t *list = &(*thrd_lists)[i];
1029 action_list_t::reverse_iterator rit;
1030 for (rit = list->rbegin();rit != list->rend();rit++) {
1031 ModelAction *act = *rit;
1033 /* Don't disallow due to act == reader */
1034 if (!reader->happens_before(act) || reader == act)
1036 else if (act->is_write())
1037 write_after_read = act;
1038 else if (act->is_read() && act->get_reads_from() != NULL)
1039 write_after_read = act->get_reads_from();
1042 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1049 * Computes the clock vector that happens before propagates from this write.
1051 * @param rf The action that might be part of a release sequence. Must be a
1053 * @return ClockVector of happens before relation.
1056 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1057 SnapVector<ModelAction *> * processset = NULL;
1058 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1059 ASSERT(rf->is_write());
1060 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1062 if (processset == NULL)
1063 processset = new SnapVector<ModelAction *>();
1064 processset->push_back(rf);
1067 int i = (processset == NULL) ? 0 : processset->size();
1069 ClockVector * vec = NULL;
1071 if (rf->get_rfcv() != NULL) {
1072 vec = rf->get_rfcv();
1073 } else if (rf->is_acquire() && rf->is_release()) {
1075 } else if (rf->is_release() && !rf->is_rmw()) {
1077 } else if (rf->is_release()) {
1078 //have rmw that is release and doesn't have a rfcv
1079 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1082 //operation that isn't release
1083 if (rf->get_last_fence_release()) {
1085 vec = rf->get_last_fence_release()->get_cv();
1087 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1093 rf = (*processset)[i];
1097 if (processset != NULL)
1103 * Performs various bookkeeping operations for the current ModelAction. For
1104 * instance, adds action to the per-object, per-thread action vector and to the
1105 * action trace list of all thread actions.
1107 * @param act is the ModelAction to add.
1109 void ModelExecution::add_action_to_lists(ModelAction *act)
1111 int tid = id_to_int(act->get_tid());
1112 ModelAction *uninit = NULL;
1114 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1115 if (list->empty() && act->is_atomic_var()) {
1116 uninit = get_uninitialized_action(act);
1117 uninit_id = id_to_int(uninit->get_tid());
1118 list->push_front(uninit);
1119 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1120 if (uninit_id >= (int)vec->size())
1121 vec->resize(uninit_id + 1);
1122 (*vec)[uninit_id].push_front(uninit);
1124 list->push_back(act);
1126 // Update action trace, a total order of all actions
1127 action_trace.push_back(act);
1129 action_trace.push_front(uninit);
1131 // Update obj_thrd_map, a per location, per thread, order of actions
1132 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1133 if (tid >= (int)vec->size())
1134 vec->resize(priv->next_thread_id);
1135 (*vec)[tid].push_back(act);
1137 (*vec)[uninit_id].push_front(uninit);
1139 // Update thrd_last_action, the last action taken by each thrad
1140 if ((int)thrd_last_action.size() <= tid)
1141 thrd_last_action.resize(get_num_threads());
1142 thrd_last_action[tid] = act;
1144 thrd_last_action[uninit_id] = uninit;
1146 // Update thrd_last_fence_release, the last release fence taken by each thread
1147 if (act->is_fence() && act->is_release()) {
1148 if ((int)thrd_last_fence_release.size() <= tid)
1149 thrd_last_fence_release.resize(get_num_threads());
1150 thrd_last_fence_release[tid] = act;
1153 if (act->is_wait()) {
1154 void *mutex_loc = (void *) act->get_value();
1155 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1157 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1158 if (tid >= (int)vec->size())
1159 vec->resize(priv->next_thread_id);
1160 (*vec)[tid].push_back(act);
1164 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1165 action_list_t::reverse_iterator rit = list->rbegin();
1166 modelclock_t next_seq = act->get_seq_number();
1167 if (rit == list->rend() || (*rit)->get_seq_number() == next_seq)
1168 list->push_back(act);
1170 for(;rit != list->rend();rit++) {
1171 if ((*rit)->get_seq_number() == next_seq) {
1172 action_list_t::iterator it = rit.base();
1173 list->insert(it, act);
1180 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1181 action_list_t::reverse_iterator rit = list->rbegin();
1182 modelclock_t next_seq = act->get_seq_number();
1183 if (rit == list->rend()) {
1184 act->create_cv(NULL);
1185 } else if ((*rit)->get_seq_number() == next_seq) {
1186 act->create_cv((*rit));
1187 list->push_back(act);
1189 for(;rit != list->rend();rit++) {
1190 if ((*rit)->get_seq_number() == next_seq) {
1191 act->create_cv((*rit));
1192 action_list_t::iterator it = rit.base();
1193 list->insert(it, act);
1201 * Performs various bookkeeping operations for a normal write. The
1202 * complication is that we are typically inserting a normal write
1203 * lazily, so we need to insert it into the middle of lists.
1205 * @param act is the ModelAction to add.
1208 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1210 int tid = id_to_int(act->get_tid());
1211 insertIntoActionListAndSetCV(&action_trace, act);
1213 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1214 insertIntoActionList(list, act);
1216 // Update obj_thrd_map, a per location, per thread, order of actions
1217 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1218 if (tid >= (int)vec->size())
1219 vec->resize(priv->next_thread_id);
1220 insertIntoActionList(&(*vec)[tid],act);
1222 // Update thrd_last_action, the last action taken by each thrad
1223 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1224 thrd_last_action[tid] = act;
1228 void ModelExecution::add_write_to_lists(ModelAction *write) {
1229 // Update seq_cst map
1230 if (write->is_seqcst())
1231 obj_last_sc_map.put(write->get_location(), write);
1233 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1234 int tid = id_to_int(write->get_tid());
1235 if (tid >= (int)vec->size())
1236 vec->resize(priv->next_thread_id);
1237 (*vec)[tid].push_back(write);
1241 * @brief Get the last action performed by a particular Thread
1242 * @param tid The thread ID of the Thread in question
1243 * @return The last action in the thread
1245 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1247 int threadid = id_to_int(tid);
1248 if (threadid < (int)thrd_last_action.size())
1249 return thrd_last_action[id_to_int(tid)];
1255 * @brief Get the last fence release performed by a particular Thread
1256 * @param tid The thread ID of the Thread in question
1257 * @return The last fence release in the thread, if one exists; NULL otherwise
1259 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1261 int threadid = id_to_int(tid);
1262 if (threadid < (int)thrd_last_fence_release.size())
1263 return thrd_last_fence_release[id_to_int(tid)];
1269 * Gets the last memory_order_seq_cst write (in the total global sequence)
1270 * performed on a particular object (i.e., memory location), not including the
1272 * @param curr The current ModelAction; also denotes the object location to
1274 * @return The last seq_cst write
1276 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1278 void *location = curr->get_location();
1279 return obj_last_sc_map.get(location);
1283 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1284 * performed in a particular thread, prior to a particular fence.
1285 * @param tid The ID of the thread to check
1286 * @param before_fence The fence from which to begin the search; if NULL, then
1287 * search for the most recent fence in the thread.
1288 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1290 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1292 /* All fences should have location FENCE_LOCATION */
1293 action_list_t *list = obj_map.get(FENCE_LOCATION);
1298 action_list_t::reverse_iterator rit = list->rbegin();
1301 for (;rit != list->rend();rit++)
1302 if (*rit == before_fence)
1305 ASSERT(*rit == before_fence);
1309 for (;rit != list->rend();rit++)
1310 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
1316 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1317 * location). This function identifies the mutex according to the current
1318 * action, which is presumed to perform on the same mutex.
1319 * @param curr The current ModelAction; also denotes the object location to
1321 * @return The last unlock operation
1323 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1325 void *location = curr->get_location();
1327 action_list_t *list = obj_map.get(location);
1328 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1329 action_list_t::reverse_iterator rit;
1330 for (rit = list->rbegin();rit != list->rend();rit++)
1331 if ((*rit)->is_unlock() || (*rit)->is_wait())
1336 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1338 ModelAction *parent = get_last_action(tid);
1340 parent = get_thread(tid)->get_creation();
1345 * Returns the clock vector for a given thread.
1346 * @param tid The thread whose clock vector we want
1347 * @return Desired clock vector
1349 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1351 ModelAction *firstaction=get_parent_action(tid);
1352 return firstaction != NULL ? firstaction->get_cv() : NULL;
1355 bool valequals(uint64_t val1, uint64_t val2, int size) {
1358 return ((uint8_t)val1) == ((uint8_t)val2);
1360 return ((uint16_t)val1) == ((uint16_t)val2);
1362 return ((uint32_t)val1) == ((uint32_t)val2);
1372 * Build up an initial set of all past writes that this 'read' action may read
1373 * from, as well as any previously-observed future values that must still be valid.
1375 * @param curr is the current ModelAction that we are exploring; it must be a
1378 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1380 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1382 ASSERT(curr->is_read());
1384 ModelAction *last_sc_write = NULL;
1386 if (curr->is_seqcst())
1387 last_sc_write = get_last_seq_cst_write(curr);
1389 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1391 /* Iterate over all threads */
1392 for (i = 0;i < thrd_lists->size();i++) {
1393 /* Iterate over actions in thread, starting from most recent */
1394 action_list_t *list = &(*thrd_lists)[i];
1395 action_list_t::reverse_iterator rit;
1396 for (rit = list->rbegin();rit != list->rend();rit++) {
1397 ModelAction *act = *rit;
1402 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1403 bool allow_read = true;
1405 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1408 /* Need to check whether we will have two RMW reading from the same value */
1409 if (curr->is_rmwr()) {
1410 /* It is okay if we have a failing CAS */
1411 if (!curr->is_rmwrcas() ||
1412 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1413 //Need to make sure we aren't the second RMW
1414 CycleNode * node = mo_graph->getNode_noCreate(act);
1415 if (node != NULL && node->getRMW() != NULL) {
1416 //we are the second RMW
1423 /* Only add feasible reads */
1424 rf_set->push_back(act);
1427 /* Include at most one act per-thread that "happens before" curr */
1428 if (act->happens_before(curr))
1433 if (DBG_ENABLED()) {
1434 model_print("Reached read action:\n");
1436 model_print("End printing read_from_past\n");
1442 * @brief Get an action representing an uninitialized atomic
1444 * This function may create a new one.
1446 * @param curr The current action, which prompts the creation of an UNINIT action
1447 * @return A pointer to the UNINIT ModelAction
1449 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1451 ModelAction *act = curr->get_uninit_action();
1453 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1454 curr->set_uninit_action(act);
1456 act->create_cv(NULL);
1460 static void print_list(const action_list_t *list)
1462 action_list_t::const_iterator it;
1464 model_print("------------------------------------------------------------------------------------\n");
1465 model_print("# t Action type MO Location Value Rf CV\n");
1466 model_print("------------------------------------------------------------------------------------\n");
1468 unsigned int hash = 0;
1470 for (it = list->begin();it != list->end();it++) {
1471 const ModelAction *act = *it;
1472 if (act->get_seq_number() > 0)
1474 hash = hash^(hash<<3)^((*it)->hash());
1476 model_print("HASH %u\n", hash);
1477 model_print("------------------------------------------------------------------------------------\n");
1480 #if SUPPORT_MOD_ORDER_DUMP
1481 void ModelExecution::dumpGraph(char *filename) const
1484 sprintf(buffer, "%s.dot", filename);
1485 FILE *file = fopen(buffer, "w");
1486 fprintf(file, "digraph %s {\n", filename);
1487 mo_graph->dumpNodes(file);
1488 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1490 for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
1491 ModelAction *act = *it;
1492 if (act->is_read()) {
1493 mo_graph->dot_print_node(file, act);
1494 mo_graph->dot_print_edge(file,
1495 act->get_reads_from(),
1497 "label=\"rf\", color=red, weight=2");
1499 if (thread_array[act->get_tid()]) {
1500 mo_graph->dot_print_edge(file,
1501 thread_array[id_to_int(act->get_tid())],
1503 "label=\"sb\", color=blue, weight=400");
1506 thread_array[act->get_tid()] = act;
1508 fprintf(file, "}\n");
1509 model_free(thread_array);
1514 /** @brief Prints an execution trace summary. */
1515 void ModelExecution::print_summary() const
1517 #if SUPPORT_MOD_ORDER_DUMP
1518 char buffername[100];
1519 sprintf(buffername, "exec%04u", get_execution_number());
1520 mo_graph->dumpGraphToFile(buffername);
1521 sprintf(buffername, "graph%04u", get_execution_number());
1522 dumpGraph(buffername);
1525 model_print("Execution trace %d:", get_execution_number());
1526 if (isfeasibleprefix()) {
1527 if (scheduler->all_threads_sleeping())
1528 model_print(" SLEEP-SET REDUNDANT");
1529 if (have_bug_reports())
1530 model_print(" DETECTED BUG(S)");
1532 print_infeasibility(" INFEASIBLE");
1535 print_list(&action_trace);
1541 * Add a Thread to the system for the first time. Should only be called once
1543 * @param t The Thread to add
1545 void ModelExecution::add_thread(Thread *t)
1547 unsigned int i = id_to_int(t->get_id());
1548 if (i >= thread_map.size())
1549 thread_map.resize(i + 1);
1551 if (!t->is_model_thread())
1552 scheduler->add_thread(t);
1556 * @brief Get a Thread reference by its ID
1557 * @param tid The Thread's ID
1558 * @return A Thread reference
1560 Thread * ModelExecution::get_thread(thread_id_t tid) const
1562 unsigned int i = id_to_int(tid);
1563 if (i < thread_map.size())
1564 return thread_map[i];
1569 * @brief Get a reference to the Thread in which a ModelAction was executed
1570 * @param act The ModelAction
1571 * @return A Thread reference
1573 Thread * ModelExecution::get_thread(const ModelAction *act) const
1575 return get_thread(act->get_tid());
1579 * @brief Get a Thread reference by its pthread ID
1580 * @param index The pthread's ID
1581 * @return A Thread reference
1583 Thread * ModelExecution::get_pthread(pthread_t pid) {
1589 uint32_t thread_id = x.v;
1590 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1595 * @brief Check if a Thread is currently enabled
1596 * @param t The Thread to check
1597 * @return True if the Thread is currently enabled
1599 bool ModelExecution::is_enabled(Thread *t) const
1601 return scheduler->is_enabled(t);
1605 * @brief Check if a Thread is currently enabled
1606 * @param tid The ID of the Thread to check
1607 * @return True if the Thread is currently enabled
1609 bool ModelExecution::is_enabled(thread_id_t tid) const
1611 return scheduler->is_enabled(tid);
1615 * @brief Select the next thread to execute based on the curren action
1617 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1618 * actions should be followed by the execution of their child thread. In either
1619 * case, the current action should determine the next thread schedule.
1621 * @param curr The current action
1622 * @return The next thread to run, if the current action will determine this
1623 * selection; otherwise NULL
1625 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1627 /* Do not split atomic RMW */
1628 if (curr->is_rmwr())
1629 return get_thread(curr);
1630 /* Follow CREATE with the created thread */
1631 /* which is not needed, because model.cc takes care of this */
1632 if (curr->get_type() == THREAD_CREATE)
1633 return curr->get_thread_operand();
1634 if (curr->get_type() == PTHREAD_CREATE) {
1635 return curr->get_thread_operand();
1641 * Takes the next step in the execution, if possible.
1642 * @param curr The current step to take
1643 * @return Returns the next Thread to run, if any; NULL if this execution
1646 Thread * ModelExecution::take_step(ModelAction *curr)
1648 Thread *curr_thrd = get_thread(curr);
1649 ASSERT(curr_thrd->get_state() == THREAD_READY);
1651 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1652 curr = check_current_action(curr);
1655 /* Process this action in ModelHistory for records*/
1656 model->get_history()->process_action( curr, curr_thrd->get_id() );
1658 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1659 scheduler->remove_thread(curr_thrd);
1661 return action_select_next_thread(curr);
1664 Fuzzer * ModelExecution::getFuzzer() {