11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
19 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 model_snapshot_members() :
26 /* First thread created will have id INITIAL_THREAD_ID */
27 next_thread_id(INITIAL_THREAD_ID),
28 used_sequence_numbers(0),
30 bad_synchronization(false),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
44 bool bad_synchronization;
50 /** @brief Constructor */
51 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
56 thread_map(2), /* We'll always need at least 2 threads */
60 condvar_waiters_map(),
64 thrd_last_fence_release(),
65 priv(new struct model_snapshot_members ()),
66 mo_graph(new CycleGraph()),
69 thrd_func_act_lists(),
72 /* Initialize a model-checker thread, for special ModelActions */
73 model_thread = new Thread(get_next_id());
74 add_thread(model_thread);
75 scheduler->register_engine(this);
78 /** @brief Destructor */
79 ModelExecution::~ModelExecution()
81 for (unsigned int i = 0;i < get_num_threads();i++)
82 delete get_thread(int_to_id(i));
88 int ModelExecution::get_execution_number() const
90 return model->get_execution_number();
93 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
95 action_list_t *tmp = hash->get(ptr);
97 tmp = new action_list_t();
103 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
105 SnapVector<action_list_t> *tmp = hash->get(ptr);
107 tmp = new SnapVector<action_list_t>();
113 /** @return a thread ID for a new Thread */
114 thread_id_t ModelExecution::get_next_id()
116 return priv->next_thread_id++;
119 /** @return the number of user threads created during this execution */
120 unsigned int ModelExecution::get_num_threads() const
122 return priv->next_thread_id;
125 /** @return a sequence number for a new ModelAction */
126 modelclock_t ModelExecution::get_next_seq_num()
128 return ++priv->used_sequence_numbers;
132 * @brief Should the current action wake up a given thread?
134 * @param curr The current action
135 * @param thread The thread that we might wake up
136 * @return True, if we should wake up the sleeping thread; false otherwise
138 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
140 const ModelAction *asleep = thread->get_pending();
141 /* Don't allow partial RMW to wake anyone up */
144 /* Synchronizing actions may have been backtracked */
145 if (asleep->could_synchronize_with(curr))
147 /* All acquire/release fences and fence-acquire/store-release */
148 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
150 /* Fence-release + store can awake load-acquire on the same location */
151 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
152 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
153 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
156 if (asleep->is_sleep()) {
157 if (fuzzer->shouldWake(asleep))
164 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
166 for (unsigned int i = 0;i < get_num_threads();i++) {
167 Thread *thr = get_thread(int_to_id(i));
168 if (scheduler->is_sleep_set(thr)) {
169 if (should_wake_up(curr, thr))
170 /* Remove this thread from sleep set */
171 scheduler->remove_sleep(thr);
176 /** @brief Alert the model-checker that an incorrectly-ordered
177 * synchronization was made */
178 void ModelExecution::set_bad_synchronization()
180 priv->bad_synchronization = true;
183 bool ModelExecution::assert_bug(const char *msg)
185 priv->bugs.push_back(new bug_message(msg));
187 if (isfeasibleprefix()) {
194 /** @return True, if any bugs have been reported for this execution */
195 bool ModelExecution::have_bug_reports() const
197 return priv->bugs.size() != 0;
200 /** @return True, if any fatal bugs have been reported for this execution.
201 * Any bug other than a data race is considered a fatal bug. Data races
202 * are not considered fatal unless the number of races is exceeds
203 * a threshold (temporarily set as 15).
205 bool ModelExecution::have_fatal_bug_reports() const
207 return priv->bugs.size() != 0;
210 SnapVector<bug_message *> * ModelExecution::get_bugs() const
216 * Check whether the current trace has triggered an assertion which should halt
219 * @return True, if the execution should be aborted; false otherwise
221 bool ModelExecution::has_asserted() const
223 return priv->asserted;
227 * Trigger a trace assertion which should cause this execution to be halted.
228 * This can be due to a detected bug or due to an infeasibility that should
231 void ModelExecution::set_assert()
233 priv->asserted = true;
237 * Check if we are in a deadlock. Should only be called at the end of an
238 * execution, although it should not give false positives in the middle of an
239 * execution (there should be some ENABLED thread).
241 * @return True if program is in a deadlock; false otherwise
243 bool ModelExecution::is_deadlocked() const
245 bool blocking_threads = false;
246 for (unsigned int i = 0;i < get_num_threads();i++) {
247 thread_id_t tid = int_to_id(i);
250 Thread *t = get_thread(tid);
251 if (!t->is_model_thread() && t->get_pending())
252 blocking_threads = true;
254 return blocking_threads;
258 * Check if this is a complete execution. That is, have all thread completed
259 * execution (rather than exiting because sleep sets have forced a redundant
262 * @return True if the execution is complete.
264 bool ModelExecution::is_complete_execution() const
266 for (unsigned int i = 0;i < get_num_threads();i++)
267 if (is_enabled(int_to_id(i)))
272 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
273 uint64_t value = *((const uint64_t *) location);
274 modelclock_t storeclock;
275 thread_id_t storethread;
276 getStoreThreadAndClock(location, &storethread, &storeclock);
277 setAtomicStoreFlag(location);
278 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
279 act->set_seq_number(storeclock);
280 add_normal_write_to_lists(act);
281 add_write_to_lists(act);
282 w_modification_order(act);
287 * Processes a read model action.
288 * @param curr is the read model action to process.
289 * @param rf_set is the set of model actions we can possibly read from
290 * @return True if processing this read updates the mo_graph.
292 void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
294 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
295 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
296 if (hasnonatomicstore) {
297 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
298 rf_set->push_back(nonatomicstore);
302 int index = fuzzer->selectWrite(curr, rf_set);
303 ModelAction *rf = (*rf_set)[index];
307 bool canprune = false;
308 if (r_modification_order(curr, rf, priorset, &canprune)) {
309 for(unsigned int i=0;i<priorset->size();i++) {
310 mo_graph->addEdge((*priorset)[i], rf);
313 get_thread(curr)->set_return_value(curr->get_return_value());
315 if (canprune && curr->get_type() == ATOMIC_READ) {
316 int tid = id_to_int(curr->get_tid());
317 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
322 (*rf_set)[index] = rf_set->back();
328 * Processes a lock, trylock, or unlock model action. @param curr is
329 * the read model action to process.
331 * The try lock operation checks whether the lock is taken. If not,
332 * it falls to the normal lock operation case. If so, it returns
335 * The lock operation has already been checked that it is enabled, so
336 * it just grabs the lock and synchronizes with the previous unlock.
338 * The unlock operation has to re-enable all of the threads that are
339 * waiting on the lock.
341 * @return True if synchronization was updated; false otherwise
343 bool ModelExecution::process_mutex(ModelAction *curr)
345 cdsc::mutex *mutex = curr->get_mutex();
346 struct cdsc::mutex_state *state = NULL;
349 state = mutex->get_state();
351 switch (curr->get_type()) {
352 case ATOMIC_TRYLOCK: {
353 bool success = !state->locked;
354 curr->set_try_lock(success);
356 get_thread(curr)->set_return_value(0);
359 get_thread(curr)->set_return_value(1);
361 //otherwise fall into the lock case
363 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
364 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
365 // assert_bug("Lock access before initialization");
366 state->locked = get_thread(curr);
367 ModelAction *unlock = get_last_unlock(curr);
368 //synchronize with the previous unlock statement
369 if (unlock != NULL) {
370 synchronize(unlock, curr);
376 case ATOMIC_UNLOCK: {
377 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
379 /* wake up the other threads */
380 for (unsigned int i = 0;i < get_num_threads();i++) {
381 Thread *t = get_thread(int_to_id(i));
382 Thread *curr_thrd = get_thread(curr);
383 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
387 /* unlock the lock - after checking who was waiting on it */
388 state->locked = NULL;
390 if (!curr->is_wait())
391 break;/* The rest is only for ATOMIC_WAIT */
395 case ATOMIC_NOTIFY_ALL: {
396 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
397 //activate all the waiting threads
398 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
399 scheduler->wake(get_thread(rit->getVal()));
404 case ATOMIC_NOTIFY_ONE: {
405 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
406 if (waiters->size() != 0) {
407 Thread * thread = fuzzer->selectNotify(waiters);
408 scheduler->wake(thread);
420 * Process a write ModelAction
421 * @param curr The ModelAction to process
422 * @return True if the mo_graph was updated or promises were resolved
424 void ModelExecution::process_write(ModelAction *curr)
426 w_modification_order(curr);
427 get_thread(curr)->set_return_value(VALUE_NONE);
431 * Process a fence ModelAction
432 * @param curr The ModelAction to process
433 * @return True if synchronization was updated
435 bool ModelExecution::process_fence(ModelAction *curr)
438 * fence-relaxed: no-op
439 * fence-release: only log the occurence (not in this function), for
440 * use in later synchronization
441 * fence-acquire (this function): search for hypothetical release
443 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
445 bool updated = false;
446 if (curr->is_acquire()) {
447 action_list_t *list = &action_trace;
448 sllnode<ModelAction *> * rit;
449 /* Find X : is_read(X) && X --sb-> curr */
450 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
451 ModelAction *act = rit->getVal();
454 if (act->get_tid() != curr->get_tid())
456 /* Stop at the beginning of the thread */
457 if (act->is_thread_start())
459 /* Stop once we reach a prior fence-acquire */
460 if (act->is_fence() && act->is_acquire())
464 /* read-acquire will find its own release sequences */
465 if (act->is_acquire())
468 /* Establish hypothetical release sequences */
469 ClockVector *cv = get_hb_from_write(act->get_reads_from());
470 if (cv != NULL && curr->get_cv()->merge(cv))
478 * @brief Process the current action for thread-related activity
480 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
481 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
482 * synchronization, etc. This function is a no-op for non-THREAD actions
483 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
485 * @param curr The current action
486 * @return True if synchronization was updated or a thread completed
488 void ModelExecution::process_thread_action(ModelAction *curr)
490 switch (curr->get_type()) {
491 case THREAD_CREATE: {
492 thrd_t *thrd = (thrd_t *)curr->get_location();
493 struct thread_params *params = (struct thread_params *)curr->get_value();
494 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
495 curr->set_thread_operand(th);
497 th->set_creation(curr);
500 case PTHREAD_CREATE: {
501 (*(uint32_t *)curr->get_location()) = pthread_counter++;
503 struct pthread_params *params = (struct pthread_params *)curr->get_value();
504 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
505 curr->set_thread_operand(th);
507 th->set_creation(curr);
509 if ( pthread_map.size() < pthread_counter )
510 pthread_map.resize( pthread_counter );
511 pthread_map[ pthread_counter-1 ] = th;
516 Thread *blocking = curr->get_thread_operand();
517 ModelAction *act = get_last_action(blocking->get_id());
518 synchronize(act, curr);
522 Thread *blocking = curr->get_thread_operand();
523 ModelAction *act = get_last_action(blocking->get_id());
524 synchronize(act, curr);
525 break; // WL: to be add (modified)
528 case THREADONLY_FINISH:
529 case THREAD_FINISH: {
530 Thread *th = get_thread(curr);
531 if (curr->get_type() == THREAD_FINISH &&
532 th == model->getInitThread()) {
538 /* Wake up any joining threads */
539 for (unsigned int i = 0;i < get_num_threads();i++) {
540 Thread *waiting = get_thread(int_to_id(i));
541 if (waiting->waiting_on() == th &&
542 waiting->get_pending()->is_thread_join())
543 scheduler->wake(waiting);
557 * Initialize the current action by performing one or more of the following
558 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
559 * manipulating backtracking sets, allocating and
560 * initializing clock vectors, and computing the promises to fulfill.
562 * @param curr The current action, as passed from the user context; may be
563 * freed/invalidated after the execution of this function, with a different
564 * action "returned" its place (pass-by-reference)
565 * @return True if curr is a newly-explored action; false otherwise
567 bool ModelExecution::initialize_curr_action(ModelAction **curr)
569 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
570 ModelAction *newcurr = process_rmw(*curr);
576 ModelAction *newcurr = *curr;
578 newcurr->set_seq_number(get_next_seq_num());
579 /* Always compute new clock vector */
580 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
582 /* Assign most recent release fence */
583 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
585 return true; /* This was a new ModelAction */
590 * @brief Establish reads-from relation between two actions
592 * Perform basic operations involved with establishing a concrete rf relation,
593 * including setting the ModelAction data and checking for release sequences.
595 * @param act The action that is reading (must be a read)
596 * @param rf The action from which we are reading (must be a write)
598 * @return True if this read established synchronization
601 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
604 ASSERT(rf->is_write());
606 act->set_read_from(rf);
607 if (act->is_acquire()) {
608 ClockVector *cv = get_hb_from_write(rf);
611 act->get_cv()->merge(cv);
616 * @brief Synchronizes two actions
618 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
619 * This function performs the synchronization as well as providing other hooks
620 * for other checks along with synchronization.
622 * @param first The left-hand side of the synchronizes-with relation
623 * @param second The right-hand side of the synchronizes-with relation
624 * @return True if the synchronization was successful (i.e., was consistent
625 * with the execution order); false otherwise
627 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
629 if (*second < *first) {
630 set_bad_synchronization();
633 return second->synchronize_with(first);
637 * @brief Check whether a model action is enabled.
639 * Checks whether an operation would be successful (i.e., is a lock already
640 * locked, or is the joined thread already complete).
642 * For yield-blocking, yields are never enabled.
644 * @param curr is the ModelAction to check whether it is enabled.
645 * @return a bool that indicates whether the action is enabled.
647 bool ModelExecution::check_action_enabled(ModelAction *curr) {
648 if (curr->is_lock()) {
649 cdsc::mutex *lock = curr->get_mutex();
650 struct cdsc::mutex_state *state = lock->get_state();
653 } else if (curr->is_thread_join()) {
654 Thread *blocking = curr->get_thread_operand();
655 if (!blocking->is_complete()) {
658 } else if (curr->is_sleep()) {
659 if (!fuzzer->shouldSleep(curr))
667 * This is the heart of the model checker routine. It performs model-checking
668 * actions corresponding to a given "current action." Among other processes, it
669 * calculates reads-from relationships, updates synchronization clock vectors,
670 * forms a memory_order constraints graph, and handles replay/backtrack
671 * execution when running permutations of previously-observed executions.
673 * @param curr The current action to process
674 * @return The ModelAction that is actually executed; may be different than
677 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
680 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
681 bool newly_explored = initialize_curr_action(&curr);
685 wake_up_sleeping_actions(curr);
687 /* Add the action to lists before any other model-checking tasks */
688 if (!second_part_of_rmw && curr->get_type() != NOOP)
689 add_action_to_lists(curr);
691 if (curr->is_write())
692 add_write_to_lists(curr);
694 SnapVector<ModelAction *> * rf_set = NULL;
695 /* Build may_read_from set for newly-created actions */
696 if (newly_explored && curr->is_read())
697 rf_set = build_may_read_from(curr);
699 process_thread_action(curr);
701 if (curr->is_read() && !second_part_of_rmw) {
702 process_read(curr, rf_set);
705 ASSERT(rf_set == NULL);
708 if (curr->is_write())
711 if (curr->is_fence())
714 if (curr->is_mutex_op())
721 * This is the strongest feasibility check available.
722 * @return whether the current trace (partial or complete) must be a prefix of
725 bool ModelExecution::isfeasibleprefix() const
727 return !is_infeasible();
731 * Print disagnostic information about an infeasible execution
732 * @param prefix A string to prefix the output with; if NULL, then a default
733 * message prefix will be provided
735 void ModelExecution::print_infeasibility(const char *prefix) const
739 if (priv->bad_synchronization)
740 ptr += sprintf(ptr, "[bad sw ordering]");
742 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
746 * Check if the current partial trace is infeasible. Does not check any
747 * end-of-execution flags, which might rule out the execution. Thus, this is
748 * useful only for ruling an execution as infeasible.
749 * @return whether the current partial trace is infeasible.
751 bool ModelExecution::is_infeasible() const
753 return priv->bad_synchronization;
756 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
757 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
758 ModelAction *lastread = get_last_action(act->get_tid());
759 lastread->process_rmw(act);
761 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
767 * @brief Updates the mo_graph with the constraints imposed from the current
770 * Basic idea is the following: Go through each other thread and find
771 * the last action that happened before our read. Two cases:
773 * -# The action is a write: that write must either occur before
774 * the write we read from or be the write we read from.
775 * -# The action is a read: the write that that action read from
776 * must occur before the write we read from or be the same write.
778 * @param curr The current action. Must be a read.
779 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
780 * @return True if modification order edges were added; false otherwise
783 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
785 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
787 ASSERT(curr->is_read());
789 /* Last SC fence in the current thread */
790 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
792 int tid = curr->get_tid();
793 ModelAction *prev_same_thread = NULL;
794 /* Iterate over all threads */
795 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
796 /* Last SC fence in thread tid */
797 ModelAction *last_sc_fence_thread_local = NULL;
799 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
801 /* Last SC fence in thread tid, before last SC fence in current thread */
802 ModelAction *last_sc_fence_thread_before = NULL;
803 if (last_sc_fence_local)
804 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
806 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
807 if (prev_same_thread != NULL &&
808 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
809 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
813 /* Iterate over actions in thread, starting from most recent */
814 action_list_t *list = &(*thrd_lists)[tid];
815 sllnode<ModelAction *> * rit;
816 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
817 ModelAction *act = rit->getVal();
822 /* Don't want to add reflexive edges on 'rf' */
823 if (act->equals(rf)) {
824 if (act->happens_before(curr))
830 if (act->is_write()) {
831 /* C++, Section 29.3 statement 5 */
832 if (curr->is_seqcst() && last_sc_fence_thread_local &&
833 *act < *last_sc_fence_thread_local) {
834 if (mo_graph->checkReachable(rf, act))
836 priorset->push_back(act);
839 /* C++, Section 29.3 statement 4 */
840 else if (act->is_seqcst() && last_sc_fence_local &&
841 *act < *last_sc_fence_local) {
842 if (mo_graph->checkReachable(rf, act))
844 priorset->push_back(act);
847 /* C++, Section 29.3 statement 6 */
848 else if (last_sc_fence_thread_before &&
849 *act < *last_sc_fence_thread_before) {
850 if (mo_graph->checkReachable(rf, act))
852 priorset->push_back(act);
858 * Include at most one act per-thread that "happens
861 if (act->happens_before(curr)) {
863 if (last_sc_fence_local == NULL ||
864 (*last_sc_fence_local < *act)) {
865 prev_same_thread = act;
868 if (act->is_write()) {
869 if (mo_graph->checkReachable(rf, act))
871 priorset->push_back(act);
873 const ModelAction *prevrf = act->get_reads_from();
874 if (!prevrf->equals(rf)) {
875 if (mo_graph->checkReachable(rf, prevrf))
877 priorset->push_back(prevrf);
879 if (act->get_tid() == curr->get_tid()) {
880 //Can prune curr from obj list
893 * Updates the mo_graph with the constraints imposed from the current write.
895 * Basic idea is the following: Go through each other thread and find
896 * the lastest action that happened before our write. Two cases:
898 * (1) The action is a write => that write must occur before
901 * (2) The action is a read => the write that that action read from
902 * must occur before the current write.
904 * This method also handles two other issues:
906 * (I) Sequential Consistency: Making sure that if the current write is
907 * seq_cst, that it occurs after the previous seq_cst write.
909 * (II) Sending the write back to non-synchronizing reads.
911 * @param curr The current action. Must be a write.
912 * @param send_fv A vector for stashing reads to which we may pass our future
913 * value. If NULL, then don't record any future values.
914 * @return True if modification order edges were added; false otherwise
916 void ModelExecution::w_modification_order(ModelAction *curr)
918 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
920 ASSERT(curr->is_write());
922 SnapList<ModelAction *> edgeset;
924 if (curr->is_seqcst()) {
925 /* We have to at least see the last sequentially consistent write,
926 so we are initialized. */
927 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
928 if (last_seq_cst != NULL) {
929 edgeset.push_back(last_seq_cst);
931 //update map for next query
932 obj_last_sc_map.put(curr->get_location(), curr);
935 /* Last SC fence in the current thread */
936 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
938 /* Iterate over all threads */
939 for (i = 0;i < thrd_lists->size();i++) {
940 /* Last SC fence in thread i, before last SC fence in current thread */
941 ModelAction *last_sc_fence_thread_before = NULL;
942 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
943 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
945 /* Iterate over actions in thread, starting from most recent */
946 action_list_t *list = &(*thrd_lists)[i];
947 sllnode<ModelAction*>* rit;
948 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
949 ModelAction *act = rit->getVal();
952 * 1) If RMW and it actually read from something, then we
953 * already have all relevant edges, so just skip to next
956 * 2) If RMW and it didn't read from anything, we should
957 * whatever edge we can get to speed up convergence.
959 * 3) If normal write, we need to look at earlier actions, so
960 * continue processing list.
962 if (curr->is_rmw()) {
963 if (curr->get_reads_from() != NULL)
971 /* C++, Section 29.3 statement 7 */
972 if (last_sc_fence_thread_before && act->is_write() &&
973 *act < *last_sc_fence_thread_before) {
974 edgeset.push_back(act);
979 * Include at most one act per-thread that "happens
982 if (act->happens_before(curr)) {
984 * Note: if act is RMW, just add edge:
986 * The following edge should be handled elsewhere:
987 * readfrom(act) --mo--> act
990 edgeset.push_back(act);
991 else if (act->is_read()) {
992 //if previous read accessed a null, just keep going
993 edgeset.push_back(act->get_reads_from());
999 mo_graph->addEdges(&edgeset, curr);
1004 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1005 * some constraints. This method checks one the following constraint (others
1006 * require compiler support):
1008 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1009 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1011 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1013 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1015 /* Iterate over all threads */
1016 for (i = 0;i < thrd_lists->size();i++) {
1017 const ModelAction *write_after_read = NULL;
1019 /* Iterate over actions in thread, starting from most recent */
1020 action_list_t *list = &(*thrd_lists)[i];
1021 sllnode<ModelAction *>* rit;
1022 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1023 ModelAction *act = rit->getVal();
1025 /* Don't disallow due to act == reader */
1026 if (!reader->happens_before(act) || reader == act)
1028 else if (act->is_write())
1029 write_after_read = act;
1030 else if (act->is_read() && act->get_reads_from() != NULL)
1031 write_after_read = act->get_reads_from();
1034 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1041 * Computes the clock vector that happens before propagates from this write.
1043 * @param rf The action that might be part of a release sequence. Must be a
1045 * @return ClockVector of happens before relation.
1048 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1049 SnapVector<ModelAction *> * processset = NULL;
1050 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1051 ASSERT(rf->is_write());
1052 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1054 if (processset == NULL)
1055 processset = new SnapVector<ModelAction *>();
1056 processset->push_back(rf);
1059 int i = (processset == NULL) ? 0 : processset->size();
1061 ClockVector * vec = NULL;
1063 if (rf->get_rfcv() != NULL) {
1064 vec = rf->get_rfcv();
1065 } else if (rf->is_acquire() && rf->is_release()) {
1067 } else if (rf->is_release() && !rf->is_rmw()) {
1069 } else if (rf->is_release()) {
1070 //have rmw that is release and doesn't have a rfcv
1071 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1074 //operation that isn't release
1075 if (rf->get_last_fence_release()) {
1077 vec = rf->get_last_fence_release()->get_cv();
1079 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1085 rf = (*processset)[i];
1089 if (processset != NULL)
1095 * Performs various bookkeeping operations for the current ModelAction. For
1096 * instance, adds action to the per-object, per-thread action vector and to the
1097 * action trace list of all thread actions.
1099 * @param act is the ModelAction to add.
1101 void ModelExecution::add_action_to_lists(ModelAction *act)
1103 int tid = id_to_int(act->get_tid());
1104 ModelAction *uninit = NULL;
1106 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1107 if (list->empty() && act->is_atomic_var()) {
1108 uninit = get_uninitialized_action(act);
1109 uninit_id = id_to_int(uninit->get_tid());
1110 list->push_front(uninit);
1111 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1112 if (uninit_id >= (int)vec->size()) {
1113 int oldsize = (int) vec->size();
1114 vec->resize(uninit_id + 1);
1115 for(int i=oldsize;i<uninit_id+1;i++) {
1116 new(&(*vec)[i]) action_list_t();
1119 (*vec)[uninit_id].push_front(uninit);
1121 list->push_back(act);
1123 // Update action trace, a total order of all actions
1124 action_trace.push_back(act);
1126 action_trace.push_front(uninit);
1128 // Update obj_thrd_map, a per location, per thread, order of actions
1129 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1130 if (tid >= (int)vec->size()) {
1131 uint oldsize =vec->size();
1132 vec->resize(priv->next_thread_id);
1133 for(uint i=oldsize;i<priv->next_thread_id;i++)
1134 new (&(*vec)[i]) action_list_t();
1136 (*vec)[tid].push_back(act);
1138 (*vec)[uninit_id].push_front(uninit);
1140 // Update thrd_last_action, the last action taken by each thrad
1141 if ((int)thrd_last_action.size() <= tid)
1142 thrd_last_action.resize(get_num_threads());
1143 thrd_last_action[tid] = act;
1145 thrd_last_action[uninit_id] = uninit;
1147 // Update thrd_last_fence_release, the last release fence taken by each thread
1148 if (act->is_fence() && act->is_release()) {
1149 if ((int)thrd_last_fence_release.size() <= tid)
1150 thrd_last_fence_release.resize(get_num_threads());
1151 thrd_last_fence_release[tid] = act;
1154 if (act->is_wait()) {
1155 void *mutex_loc = (void *) act->get_value();
1156 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1158 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1159 if (tid >= (int)vec->size()) {
1160 uint oldsize = vec->size();
1161 vec->resize(priv->next_thread_id);
1162 for(uint i=oldsize;i<priv->next_thread_id;i++)
1163 new (&(*vec)[i]) action_list_t();
1165 (*vec)[tid].push_back(act);
1168 /* Update thrd_func_act_lists, list of actions in functions entered by each thread
1169 * To be used by FuncNode and only care about actions with a position */
1170 if (act->get_position() != NULL) {
1171 SnapList<action_list_t *> * func_act_lists = thrd_func_act_lists[tid];
1172 action_list_t * curr_act_list = func_act_lists->back();
1173 ASSERT(curr_act_list != NULL);
1174 curr_act_list->push_back(act);
1178 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1179 sllnode<ModelAction*> * rit = list->end();
1180 modelclock_t next_seq = act->get_seq_number();
1181 if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
1182 list->push_back(act);
1184 for(;rit != NULL;rit=rit->getPrev()) {
1185 if (rit->getVal()->get_seq_number() == next_seq) {
1186 list->insertAfter(rit, act);
1193 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1194 sllnode<ModelAction*> * rit = list->end();
1195 modelclock_t next_seq = act->get_seq_number();
1197 act->create_cv(NULL);
1198 } else if (rit->getVal()->get_seq_number() == next_seq) {
1199 act->create_cv(rit->getVal());
1200 list->push_back(act);
1202 for(;rit != NULL;rit=rit->getPrev()) {
1203 if (rit->getVal()->get_seq_number() == next_seq) {
1204 act->create_cv(rit->getVal());
1205 list->insertAfter(rit, act);
1213 * Performs various bookkeeping operations for a normal write. The
1214 * complication is that we are typically inserting a normal write
1215 * lazily, so we need to insert it into the middle of lists.
1217 * @param act is the ModelAction to add.
1220 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1222 int tid = id_to_int(act->get_tid());
1223 insertIntoActionListAndSetCV(&action_trace, act);
1225 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1226 insertIntoActionList(list, act);
1228 // Update obj_thrd_map, a per location, per thread, order of actions
1229 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1230 if (tid >= (int)vec->size()) {
1231 uint oldsize =vec->size();
1232 vec->resize(priv->next_thread_id);
1233 for(uint i=oldsize;i<priv->next_thread_id;i++)
1234 new (&(*vec)[i]) action_list_t();
1236 insertIntoActionList(&(*vec)[tid],act);
1238 // Update thrd_last_action, the last action taken by each thrad
1239 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1240 thrd_last_action[tid] = act;
1244 void ModelExecution::add_write_to_lists(ModelAction *write) {
1245 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1246 int tid = id_to_int(write->get_tid());
1247 if (tid >= (int)vec->size()) {
1248 uint oldsize =vec->size();
1249 vec->resize(priv->next_thread_id);
1250 for(uint i=oldsize;i<priv->next_thread_id;i++)
1251 new (&(*vec)[i]) action_list_t();
1253 (*vec)[tid].push_back(write);
1257 * @brief Get the last action performed by a particular Thread
1258 * @param tid The thread ID of the Thread in question
1259 * @return The last action in the thread
1261 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1263 int threadid = id_to_int(tid);
1264 if (threadid < (int)thrd_last_action.size())
1265 return thrd_last_action[id_to_int(tid)];
1271 * @brief Get the last fence release performed by a particular Thread
1272 * @param tid The thread ID of the Thread in question
1273 * @return The last fence release in the thread, if one exists; NULL otherwise
1275 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1277 int threadid = id_to_int(tid);
1278 if (threadid < (int)thrd_last_fence_release.size())
1279 return thrd_last_fence_release[id_to_int(tid)];
1285 * Gets the last memory_order_seq_cst write (in the total global sequence)
1286 * performed on a particular object (i.e., memory location), not including the
1288 * @param curr The current ModelAction; also denotes the object location to
1290 * @return The last seq_cst write
1292 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1294 void *location = curr->get_location();
1295 return obj_last_sc_map.get(location);
1299 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1300 * performed in a particular thread, prior to a particular fence.
1301 * @param tid The ID of the thread to check
1302 * @param before_fence The fence from which to begin the search; if NULL, then
1303 * search for the most recent fence in the thread.
1304 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1306 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1308 /* All fences should have location FENCE_LOCATION */
1309 action_list_t *list = obj_map.get(FENCE_LOCATION);
1314 sllnode<ModelAction*>* rit = list->end();
1317 for (;rit != NULL;rit=rit->getPrev())
1318 if (rit->getVal() == before_fence)
1321 ASSERT(rit->getVal() == before_fence);
1325 for (;rit != NULL;rit=rit->getPrev()) {
1326 ModelAction *act = rit->getVal();
1327 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1334 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1335 * location). This function identifies the mutex according to the current
1336 * action, which is presumed to perform on the same mutex.
1337 * @param curr The current ModelAction; also denotes the object location to
1339 * @return The last unlock operation
1341 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1343 void *location = curr->get_location();
1345 action_list_t *list = obj_map.get(location);
1346 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1347 sllnode<ModelAction*>* rit;
1348 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1349 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1350 return rit->getVal();
1354 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1356 ModelAction *parent = get_last_action(tid);
1358 parent = get_thread(tid)->get_creation();
1363 * Returns the clock vector for a given thread.
1364 * @param tid The thread whose clock vector we want
1365 * @return Desired clock vector
1367 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1369 ModelAction *firstaction=get_parent_action(tid);
1370 return firstaction != NULL ? firstaction->get_cv() : NULL;
1373 bool valequals(uint64_t val1, uint64_t val2, int size) {
1376 return ((uint8_t)val1) == ((uint8_t)val2);
1378 return ((uint16_t)val1) == ((uint16_t)val2);
1380 return ((uint32_t)val1) == ((uint32_t)val2);
1390 * Build up an initial set of all past writes that this 'read' action may read
1391 * from, as well as any previously-observed future values that must still be valid.
1393 * @param curr is the current ModelAction that we are exploring; it must be a
1396 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1398 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1400 ASSERT(curr->is_read());
1402 ModelAction *last_sc_write = NULL;
1404 if (curr->is_seqcst())
1405 last_sc_write = get_last_seq_cst_write(curr);
1407 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1409 /* Iterate over all threads */
1410 for (i = 0;i < thrd_lists->size();i++) {
1411 /* Iterate over actions in thread, starting from most recent */
1412 action_list_t *list = &(*thrd_lists)[i];
1413 sllnode<ModelAction *> * rit;
1414 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1415 ModelAction *act = rit->getVal();
1420 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1421 bool allow_read = true;
1423 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1426 /* Need to check whether we will have two RMW reading from the same value */
1427 if (curr->is_rmwr()) {
1428 /* It is okay if we have a failing CAS */
1429 if (!curr->is_rmwrcas() ||
1430 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1431 //Need to make sure we aren't the second RMW
1432 CycleNode * node = mo_graph->getNode_noCreate(act);
1433 if (node != NULL && node->getRMW() != NULL) {
1434 //we are the second RMW
1441 /* Only add feasible reads */
1442 rf_set->push_back(act);
1445 /* Include at most one act per-thread that "happens before" curr */
1446 if (act->happens_before(curr))
1451 if (DBG_ENABLED()) {
1452 model_print("Reached read action:\n");
1454 model_print("End printing read_from_past\n");
1460 * @brief Get an action representing an uninitialized atomic
1462 * This function may create a new one.
1464 * @param curr The current action, which prompts the creation of an UNINIT action
1465 * @return A pointer to the UNINIT ModelAction
1467 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1469 ModelAction *act = curr->get_uninit_action();
1471 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1472 curr->set_uninit_action(act);
1474 act->create_cv(NULL);
1478 static void print_list(action_list_t *list)
1480 sllnode<ModelAction*> *it;
1482 model_print("------------------------------------------------------------------------------------\n");
1483 model_print("# t Action type MO Location Value Rf CV\n");
1484 model_print("------------------------------------------------------------------------------------\n");
1486 unsigned int hash = 0;
1488 for (it = list->begin();it != NULL;it=it->getNext()) {
1489 const ModelAction *act = it->getVal();
1490 if (act->get_seq_number() > 0)
1492 hash = hash^(hash<<3)^(it->getVal()->hash());
1494 model_print("HASH %u\n", hash);
1495 model_print("------------------------------------------------------------------------------------\n");
1498 #if SUPPORT_MOD_ORDER_DUMP
1499 void ModelExecution::dumpGraph(char *filename)
1502 sprintf(buffer, "%s.dot", filename);
1503 FILE *file = fopen(buffer, "w");
1504 fprintf(file, "digraph %s {\n", filename);
1505 mo_graph->dumpNodes(file);
1506 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1508 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1509 ModelAction *act = it->getVal();
1510 if (act->is_read()) {
1511 mo_graph->dot_print_node(file, act);
1512 mo_graph->dot_print_edge(file,
1513 act->get_reads_from(),
1515 "label=\"rf\", color=red, weight=2");
1517 if (thread_array[act->get_tid()]) {
1518 mo_graph->dot_print_edge(file,
1519 thread_array[id_to_int(act->get_tid())],
1521 "label=\"sb\", color=blue, weight=400");
1524 thread_array[act->get_tid()] = act;
1526 fprintf(file, "}\n");
1527 model_free(thread_array);
1532 /** @brief Prints an execution trace summary. */
1533 void ModelExecution::print_summary()
1535 #if SUPPORT_MOD_ORDER_DUMP
1536 char buffername[100];
1537 sprintf(buffername, "exec%04u", get_execution_number());
1538 mo_graph->dumpGraphToFile(buffername);
1539 sprintf(buffername, "graph%04u", get_execution_number());
1540 dumpGraph(buffername);
1543 model_print("Execution trace %d:", get_execution_number());
1544 if (isfeasibleprefix()) {
1545 if (scheduler->all_threads_sleeping())
1546 model_print(" SLEEP-SET REDUNDANT");
1547 if (have_bug_reports())
1548 model_print(" DETECTED BUG(S)");
1550 print_infeasibility(" INFEASIBLE");
1553 print_list(&action_trace);
1559 * Add a Thread to the system for the first time. Should only be called once
1561 * @param t The Thread to add
1563 void ModelExecution::add_thread(Thread *t)
1565 unsigned int i = id_to_int(t->get_id());
1566 if (i >= thread_map.size())
1567 thread_map.resize(i + 1);
1569 if (!t->is_model_thread())
1570 scheduler->add_thread(t);
1574 * @brief Get a Thread reference by its ID
1575 * @param tid The Thread's ID
1576 * @return A Thread reference
1578 Thread * ModelExecution::get_thread(thread_id_t tid) const
1580 unsigned int i = id_to_int(tid);
1581 if (i < thread_map.size())
1582 return thread_map[i];
1587 * @brief Get a reference to the Thread in which a ModelAction was executed
1588 * @param act The ModelAction
1589 * @return A Thread reference
1591 Thread * ModelExecution::get_thread(const ModelAction *act) const
1593 return get_thread(act->get_tid());
1597 * @brief Get a Thread reference by its pthread ID
1598 * @param index The pthread's ID
1599 * @return A Thread reference
1601 Thread * ModelExecution::get_pthread(pthread_t pid) {
1607 uint32_t thread_id = x.v;
1608 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1613 * @brief Check if a Thread is currently enabled
1614 * @param t The Thread to check
1615 * @return True if the Thread is currently enabled
1617 bool ModelExecution::is_enabled(Thread *t) const
1619 return scheduler->is_enabled(t);
1623 * @brief Check if a Thread is currently enabled
1624 * @param tid The ID of the Thread to check
1625 * @return True if the Thread is currently enabled
1627 bool ModelExecution::is_enabled(thread_id_t tid) const
1629 return scheduler->is_enabled(tid);
1633 * @brief Select the next thread to execute based on the curren action
1635 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1636 * actions should be followed by the execution of their child thread. In either
1637 * case, the current action should determine the next thread schedule.
1639 * @param curr The current action
1640 * @return The next thread to run, if the current action will determine this
1641 * selection; otherwise NULL
1643 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1645 /* Do not split atomic RMW */
1646 if (curr->is_rmwr())
1647 return get_thread(curr);
1648 /* Follow CREATE with the created thread */
1649 /* which is not needed, because model.cc takes care of this */
1650 if (curr->get_type() == THREAD_CREATE)
1651 return curr->get_thread_operand();
1652 if (curr->get_type() == PTHREAD_CREATE) {
1653 return curr->get_thread_operand();
1659 * Takes the next step in the execution, if possible.
1660 * @param curr The current step to take
1661 * @return Returns the next Thread to run, if any; NULL if this execution
1664 Thread * ModelExecution::take_step(ModelAction *curr)
1666 Thread *curr_thrd = get_thread(curr);
1667 ASSERT(curr_thrd->get_state() == THREAD_READY);
1669 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1670 curr = check_current_action(curr);
1673 /* Process this action in ModelHistory for records */
1674 model->get_history()->process_action( curr, curr->get_tid() );
1676 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1677 scheduler->remove_thread(curr_thrd);
1679 return action_select_next_thread(curr);
1682 Fuzzer * ModelExecution::getFuzzer() {