model: fix ASSERT()
[model-checker.git] / model.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5
6 #include "model.h"
7 #include "action.h"
8 #include "nodestack.h"
9 #include "schedule.h"
10 #include "snapshot-interface.h"
11 #include "common.h"
12 #include "clockvector.h"
13 #include "cyclegraph.h"
14 #include "promise.h"
15 #include "datarace.h"
16 #include "threads-model.h"
17 #include "output.h"
18
19 #define INITIAL_THREAD_ID       0
20
21 ModelChecker *model;
22
23 struct bug_message {
24         bug_message(const char *str) {
25                 const char *fmt = "  [BUG] %s\n";
26                 msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
27                 sprintf(msg, fmt, str);
28         }
29         ~bug_message() { if (msg) snapshot_free(msg); }
30
31         char *msg;
32         void print() { model_print("%s", msg); }
33
34         SNAPSHOTALLOC
35 };
36
37 /**
38  * Structure for holding small ModelChecker members that should be snapshotted
39  */
40 struct model_snapshot_members {
41         model_snapshot_members() :
42                 /* First thread created will have id INITIAL_THREAD_ID */
43                 next_thread_id(INITIAL_THREAD_ID),
44                 used_sequence_numbers(0),
45                 next_backtrack(NULL),
46                 bugs(),
47                 stats(),
48                 failed_promise(false),
49                 too_many_reads(false),
50                 no_valid_reads(false),
51                 bad_synchronization(false),
52                 asserted(false)
53         { }
54
55         ~model_snapshot_members() {
56                 for (unsigned int i = 0; i < bugs.size(); i++)
57                         delete bugs[i];
58                 bugs.clear();
59         }
60
61         unsigned int next_thread_id;
62         modelclock_t used_sequence_numbers;
63         ModelAction *next_backtrack;
64         std::vector< bug_message *, SnapshotAlloc<bug_message *> > bugs;
65         struct execution_stats stats;
66         bool failed_promise;
67         bool too_many_reads;
68         bool no_valid_reads;
69         /** @brief Incorrectly-ordered synchronization was made */
70         bool bad_synchronization;
71         bool asserted;
72
73         SNAPSHOTALLOC
74 };
75
76 /** @brief Constructor */
77 ModelChecker::ModelChecker(struct model_params params) :
78         /* Initialize default scheduler */
79         params(params),
80         scheduler(new Scheduler()),
81         diverge(NULL),
82         earliest_diverge(NULL),
83         action_trace(new action_list_t()),
84         thread_map(new HashTable<int, Thread *, int>()),
85         obj_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
86         lock_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
87         condvar_waiters_map(new HashTable<const void *, action_list_t *, uintptr_t, 4>()),
88         obj_thrd_map(new HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4 >()),
89         promises(new std::vector< Promise *, SnapshotAlloc<Promise *> >()),
90         futurevalues(new std::vector< struct PendingFutureValue, SnapshotAlloc<struct PendingFutureValue> >()),
91         pending_rel_seqs(new std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >()),
92         thrd_last_action(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >(1)),
93         thrd_last_fence_release(new std::vector< ModelAction *, SnapshotAlloc<ModelAction *> >()),
94         node_stack(new NodeStack()),
95         priv(new struct model_snapshot_members()),
96         mo_graph(new CycleGraph())
97 {
98         /* Initialize a model-checker thread, for special ModelActions */
99         model_thread = new Thread(get_next_id());
100         thread_map->put(id_to_int(model_thread->get_id()), model_thread);
101 }
102
103 /** @brief Destructor */
104 ModelChecker::~ModelChecker()
105 {
106         for (unsigned int i = 0; i < get_num_threads(); i++)
107                 delete thread_map->get(i);
108         delete thread_map;
109
110         delete obj_thrd_map;
111         delete obj_map;
112         delete lock_waiters_map;
113         delete condvar_waiters_map;
114         delete action_trace;
115
116         for (unsigned int i = 0; i < promises->size(); i++)
117                 delete (*promises)[i];
118         delete promises;
119
120         delete pending_rel_seqs;
121
122         delete thrd_last_action;
123         delete thrd_last_fence_release;
124         delete node_stack;
125         delete scheduler;
126         delete mo_graph;
127         delete priv;
128 }
129
130 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
131 {
132         action_list_t *tmp = hash->get(ptr);
133         if (tmp == NULL) {
134                 tmp = new action_list_t();
135                 hash->put(ptr, tmp);
136         }
137         return tmp;
138 }
139
140 static std::vector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, std::vector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
141 {
142         std::vector<action_list_t> *tmp = hash->get(ptr);
143         if (tmp == NULL) {
144                 tmp = new std::vector<action_list_t>();
145                 hash->put(ptr, tmp);
146         }
147         return tmp;
148 }
149
150 /**
151  * Restores user program to initial state and resets all model-checker data
152  * structures.
153  */
154 void ModelChecker::reset_to_initial_state()
155 {
156         DEBUG("+++ Resetting to initial state +++\n");
157         node_stack->reset_execution();
158
159         /* Print all model-checker output before rollback */
160         fflush(model_out);
161
162         /**
163          * FIXME: if we utilize partial rollback, we will need to free only
164          * those pending actions which were NOT pending before the rollback
165          * point
166          */
167         for (unsigned int i = 0; i < get_num_threads(); i++)
168                 delete get_thread(int_to_id(i))->get_pending();
169
170         snapshot_backtrack_before(0);
171 }
172
173 /** @return a thread ID for a new Thread */
174 thread_id_t ModelChecker::get_next_id()
175 {
176         return priv->next_thread_id++;
177 }
178
179 /** @return the number of user threads created during this execution */
180 unsigned int ModelChecker::get_num_threads() const
181 {
182         return priv->next_thread_id;
183 }
184
185 /**
186  * Must be called from user-thread context (e.g., through the global
187  * thread_current() interface)
188  *
189  * @return The currently executing Thread.
190  */
191 Thread * ModelChecker::get_current_thread() const
192 {
193         return scheduler->get_current_thread();
194 }
195
196 /** @return a sequence number for a new ModelAction */
197 modelclock_t ModelChecker::get_next_seq_num()
198 {
199         return ++priv->used_sequence_numbers;
200 }
201
202 Node * ModelChecker::get_curr_node() const
203 {
204         return node_stack->get_head();
205 }
206
207 /**
208  * @brief Choose the next thread to execute.
209  *
210  * This function chooses the next thread that should execute. It can force the
211  * adjacency of read/write portions of a RMW action, force THREAD_CREATE to be
212  * followed by a THREAD_START, or it can enforce execution replay/backtracking.
213  * The model-checker may have no preference regarding the next thread (i.e.,
214  * when exploring a new execution ordering), in which case we defer to the
215  * scheduler.
216  *
217  * @param curr Optional: The current ModelAction. Only used if non-NULL and it
218  * might guide the choice of next thread (i.e., THREAD_CREATE should be
219  * followed by THREAD_START, or ATOMIC_RMWR followed by ATOMIC_{RMW,RMWC})
220  * @return The next chosen thread to run, if any exist. Or else if no threads
221  * remain to be executed, return NULL.
222  */
223 Thread * ModelChecker::get_next_thread(ModelAction *curr)
224 {
225         thread_id_t tid;
226
227         if (curr != NULL) {
228                 /* Do not split atomic actions. */
229                 if (curr->is_rmwr())
230                         return get_thread(curr);
231                 else if (curr->get_type() == THREAD_CREATE)
232                         return curr->get_thread_operand();
233         }
234
235         /*
236          * Have we completed exploring the preselected path? Then let the
237          * scheduler decide
238          */
239         if (diverge == NULL)
240                 return scheduler->select_next_thread();
241
242         /* Else, we are trying to replay an execution */
243         ModelAction *next = node_stack->get_next()->get_action();
244
245         if (next == diverge) {
246                 if (earliest_diverge == NULL || *diverge < *earliest_diverge)
247                         earliest_diverge = diverge;
248
249                 Node *nextnode = next->get_node();
250                 Node *prevnode = nextnode->get_parent();
251                 scheduler->update_sleep_set(prevnode);
252
253                 /* Reached divergence point */
254                 if (nextnode->increment_misc()) {
255                         /* The next node will try to satisfy a different misc_index values. */
256                         tid = next->get_tid();
257                         node_stack->pop_restofstack(2);
258                 } else if (nextnode->increment_promise()) {
259                         /* The next node will try to satisfy a different set of promises. */
260                         tid = next->get_tid();
261                         node_stack->pop_restofstack(2);
262                 } else if (nextnode->increment_read_from()) {
263                         /* The next node will read from a different value. */
264                         tid = next->get_tid();
265                         node_stack->pop_restofstack(2);
266                 } else if (nextnode->increment_relseq_break()) {
267                         /* The next node will try to resolve a release sequence differently */
268                         tid = next->get_tid();
269                         node_stack->pop_restofstack(2);
270                 } else {
271                         ASSERT(prevnode);
272                         /* Make a different thread execute for next step */
273                         scheduler->add_sleep(get_thread(next->get_tid()));
274                         tid = prevnode->get_next_backtrack();
275                         /* Make sure the backtracked thread isn't sleeping. */
276                         node_stack->pop_restofstack(1);
277                         if (diverge == earliest_diverge) {
278                                 earliest_diverge = prevnode->get_action();
279                         }
280                 }
281                 /* Start the round robin scheduler from this thread id */
282                 scheduler->set_scheduler_thread(tid);
283                 /* The correct sleep set is in the parent node. */
284                 execute_sleep_set();
285
286                 DEBUG("*** Divergence point ***\n");
287
288                 diverge = NULL;
289         } else {
290                 tid = next->get_tid();
291         }
292         DEBUG("*** ModelChecker chose next thread = %d ***\n", id_to_int(tid));
293         ASSERT(tid != THREAD_ID_T_NONE);
294         return thread_map->get(id_to_int(tid));
295 }
296
297 /**
298  * We need to know what the next actions of all threads in the sleep
299  * set will be.  This method computes them and stores the actions at
300  * the corresponding thread object's pending action.
301  */
302
303 void ModelChecker::execute_sleep_set()
304 {
305         for (unsigned int i = 0; i < get_num_threads(); i++) {
306                 thread_id_t tid = int_to_id(i);
307                 Thread *thr = get_thread(tid);
308                 if (scheduler->is_sleep_set(thr) && thr->get_pending()) {
309                         thr->get_pending()->set_sleep_flag();
310                 }
311         }
312 }
313
314 /**
315  * @brief Should the current action wake up a given thread?
316  *
317  * @param curr The current action
318  * @param thread The thread that we might wake up
319  * @return True, if we should wake up the sleeping thread; false otherwise
320  */
321 bool ModelChecker::should_wake_up(const ModelAction *curr, const Thread *thread) const
322 {
323         const ModelAction *asleep = thread->get_pending();
324         /* Don't allow partial RMW to wake anyone up */
325         if (curr->is_rmwr())
326                 return false;
327         /* Synchronizing actions may have been backtracked */
328         if (asleep->could_synchronize_with(curr))
329                 return true;
330         /* All acquire/release fences and fence-acquire/store-release */
331         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
332                 return true;
333         /* Fence-release + store can awake load-acquire on the same location */
334         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
335                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
336                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
337                         return true;
338         }
339         return false;
340 }
341
342 void ModelChecker::wake_up_sleeping_actions(ModelAction *curr)
343 {
344         for (unsigned int i = 0; i < get_num_threads(); i++) {
345                 Thread *thr = get_thread(int_to_id(i));
346                 if (scheduler->is_sleep_set(thr)) {
347                         if (should_wake_up(curr, thr))
348                                 /* Remove this thread from sleep set */
349                                 scheduler->remove_sleep(thr);
350                 }
351         }
352 }
353
354 /** @brief Alert the model-checker that an incorrectly-ordered
355  * synchronization was made */
356 void ModelChecker::set_bad_synchronization()
357 {
358         priv->bad_synchronization = true;
359 }
360
361 /**
362  * Check whether the current trace has triggered an assertion which should halt
363  * its execution.
364  *
365  * @return True, if the execution should be aborted; false otherwise
366  */
367 bool ModelChecker::has_asserted() const
368 {
369         return priv->asserted;
370 }
371
372 /**
373  * Trigger a trace assertion which should cause this execution to be halted.
374  * This can be due to a detected bug or due to an infeasibility that should
375  * halt ASAP.
376  */
377 void ModelChecker::set_assert()
378 {
379         priv->asserted = true;
380 }
381
382 /**
383  * Check if we are in a deadlock. Should only be called at the end of an
384  * execution, although it should not give false positives in the middle of an
385  * execution (there should be some ENABLED thread).
386  *
387  * @return True if program is in a deadlock; false otherwise
388  */
389 bool ModelChecker::is_deadlocked() const
390 {
391         bool blocking_threads = false;
392         for (unsigned int i = 0; i < get_num_threads(); i++) {
393                 thread_id_t tid = int_to_id(i);
394                 if (is_enabled(tid))
395                         return false;
396                 Thread *t = get_thread(tid);
397                 if (!t->is_model_thread() && t->get_pending())
398                         blocking_threads = true;
399         }
400         return blocking_threads;
401 }
402
403 /**
404  * Check if this is a complete execution. That is, have all thread completed
405  * execution (rather than exiting because sleep sets have forced a redundant
406  * execution).
407  *
408  * @return True if the execution is complete.
409  */
410 bool ModelChecker::is_complete_execution() const
411 {
412         for (unsigned int i = 0; i < get_num_threads(); i++)
413                 if (is_enabled(int_to_id(i)))
414                         return false;
415         return true;
416 }
417
418 /**
419  * @brief Assert a bug in the executing program.
420  *
421  * Use this function to assert any sort of bug in the user program. If the
422  * current trace is feasible (actually, a prefix of some feasible execution),
423  * then this execution will be aborted, printing the appropriate message. If
424  * the current trace is not yet feasible, the error message will be stashed and
425  * printed if the execution ever becomes feasible.
426  *
427  * @param msg Descriptive message for the bug (do not include newline char)
428  * @return True if bug is immediately-feasible
429  */
430 bool ModelChecker::assert_bug(const char *msg)
431 {
432         priv->bugs.push_back(new bug_message(msg));
433
434         if (isfeasibleprefix()) {
435                 set_assert();
436                 return true;
437         }
438         return false;
439 }
440
441 /**
442  * @brief Assert a bug in the executing program, asserted by a user thread
443  * @see ModelChecker::assert_bug
444  * @param msg Descriptive message for the bug (do not include newline char)
445  */
446 void ModelChecker::assert_user_bug(const char *msg)
447 {
448         /* If feasible bug, bail out now */
449         if (assert_bug(msg))
450                 switch_to_master(NULL);
451 }
452
453 /** @return True, if any bugs have been reported for this execution */
454 bool ModelChecker::have_bug_reports() const
455 {
456         return priv->bugs.size() != 0;
457 }
458
459 /** @brief Print bug report listing for this execution (if any bugs exist) */
460 void ModelChecker::print_bugs() const
461 {
462         if (have_bug_reports()) {
463                 model_print("Bug report: %zu bug%s detected\n",
464                                 priv->bugs.size(),
465                                 priv->bugs.size() > 1 ? "s" : "");
466                 for (unsigned int i = 0; i < priv->bugs.size(); i++)
467                         priv->bugs[i]->print();
468         }
469 }
470
471 /**
472  * @brief Record end-of-execution stats
473  *
474  * Must be run when exiting an execution. Records various stats.
475  * @see struct execution_stats
476  */
477 void ModelChecker::record_stats()
478 {
479         stats.num_total++;
480         if (!isfeasibleprefix())
481                 stats.num_infeasible++;
482         else if (have_bug_reports())
483                 stats.num_buggy_executions++;
484         else if (is_complete_execution())
485                 stats.num_complete++;
486         else {
487                 stats.num_redundant++;
488
489                 /**
490                  * @todo We can violate this ASSERT() when fairness/sleep sets
491                  * conflict to cause an execution to terminate, e.g. with:
492                  * Scheduler: [0: disabled][1: disabled][2: sleep][3: current, enabled]
493                  */
494                 //ASSERT(scheduler->all_threads_sleeping());
495         }
496 }
497
498 /** @brief Print execution stats */
499 void ModelChecker::print_stats() const
500 {
501         model_print("Number of complete, bug-free executions: %d\n", stats.num_complete);
502         model_print("Number of redundant executions: %d\n", stats.num_redundant);
503         model_print("Number of buggy executions: %d\n", stats.num_buggy_executions);
504         model_print("Number of infeasible executions: %d\n", stats.num_infeasible);
505         model_print("Total executions: %d\n", stats.num_total);
506         model_print("Total nodes created: %d\n", node_stack->get_total_nodes());
507 }
508
509 /**
510  * @brief End-of-exeuction print
511  * @param printbugs Should any existing bugs be printed?
512  */
513 void ModelChecker::print_execution(bool printbugs) const
514 {
515         print_program_output();
516
517         if (DBG_ENABLED() || params.verbose) {
518                 model_print("Earliest divergence point since last feasible execution:\n");
519                 if (earliest_diverge)
520                         earliest_diverge->print();
521                 else
522                         model_print("(Not set)\n");
523
524                 model_print("\n");
525                 print_stats();
526         }
527
528         /* Don't print invalid bugs */
529         if (printbugs)
530                 print_bugs();
531
532         model_print("\n");
533         print_summary();
534 }
535
536 /**
537  * Queries the model-checker for more executions to explore and, if one
538  * exists, resets the model-checker state to execute a new execution.
539  *
540  * @return If there are more executions to explore, return true. Otherwise,
541  * return false.
542  */
543 bool ModelChecker::next_execution()
544 {
545         DBG();
546         /* Is this execution a feasible execution that's worth bug-checking? */
547         bool complete = isfeasibleprefix() && (is_complete_execution() ||
548                         have_bug_reports());
549
550         /* End-of-execution bug checks */
551         if (complete) {
552                 if (is_deadlocked())
553                         assert_bug("Deadlock detected");
554
555                 checkDataRaces();
556         }
557
558         record_stats();
559
560         /* Output */
561         if (DBG_ENABLED() || params.verbose || (complete && have_bug_reports()))
562                 print_execution(complete);
563         else
564                 clear_program_output();
565
566         if (complete)
567                 earliest_diverge = NULL;
568
569         if ((diverge = get_next_backtrack()) == NULL)
570                 return false;
571
572         if (DBG_ENABLED()) {
573                 model_print("Next execution will diverge at:\n");
574                 diverge->print();
575         }
576
577         reset_to_initial_state();
578         return true;
579 }
580
581 /**
582  * @brief Find the last fence-related backtracking conflict for a ModelAction
583  *
584  * This function performs the search for the most recent conflicting action
585  * against which we should perform backtracking, as affected by fence
586  * operations. This includes pairs of potentially-synchronizing actions which
587  * occur due to fence-acquire or fence-release, and hence should be explored in
588  * the opposite execution order.
589  *
590  * @param act The current action
591  * @return The most recent action which conflicts with act due to fences
592  */
593 ModelAction * ModelChecker::get_last_fence_conflict(ModelAction *act) const
594 {
595         /* Only perform release/acquire fence backtracking for stores */
596         if (!act->is_write())
597                 return NULL;
598
599         /* Find a fence-release (or, act is a release) */
600         ModelAction *last_release;
601         if (act->is_release())
602                 last_release = act;
603         else
604                 last_release = get_last_fence_release(act->get_tid());
605         if (!last_release)
606                 return NULL;
607
608         /* Skip past the release */
609         action_list_t *list = action_trace;
610         action_list_t::reverse_iterator rit;
611         for (rit = list->rbegin(); rit != list->rend(); rit++)
612                 if (*rit == last_release)
613                         break;
614         ASSERT(rit != list->rend());
615
616         /* Find a prior:
617          *   load-acquire
618          * or
619          *   load --sb-> fence-acquire */
620         std::vector< ModelAction *, ModelAlloc<ModelAction *> > acquire_fences(get_num_threads(), NULL);
621         std::vector< ModelAction *, ModelAlloc<ModelAction *> > prior_loads(get_num_threads(), NULL);
622         bool found_acquire_fences = false;
623         for ( ; rit != list->rend(); rit++) {
624                 ModelAction *prev = *rit;
625                 if (act->same_thread(prev))
626                         continue;
627
628                 int tid = id_to_int(prev->get_tid());
629
630                 if (prev->is_read() && act->same_var(prev)) {
631                         if (prev->is_acquire()) {
632                                 /* Found most recent load-acquire, don't need
633                                  * to search for more fences */
634                                 if (!found_acquire_fences)
635                                         return NULL;
636                         } else {
637                                 prior_loads[tid] = prev;
638                         }
639                 }
640                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
641                         found_acquire_fences = true;
642                         acquire_fences[tid] = prev;
643                 }
644         }
645
646         ModelAction *latest_backtrack = NULL;
647         for (unsigned int i = 0; i < acquire_fences.size(); i++)
648                 if (acquire_fences[i] && prior_loads[i])
649                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
650                                 latest_backtrack = acquire_fences[i];
651         return latest_backtrack;
652 }
653
654 /**
655  * @brief Find the last backtracking conflict for a ModelAction
656  *
657  * This function performs the search for the most recent conflicting action
658  * against which we should perform backtracking. This primary includes pairs of
659  * synchronizing actions which should be explored in the opposite execution
660  * order.
661  *
662  * @param act The current action
663  * @return The most recent action which conflicts with act
664  */
665 ModelAction * ModelChecker::get_last_conflict(ModelAction *act) const
666 {
667         switch (act->get_type()) {
668         /* case ATOMIC_FENCE: fences don't directly cause backtracking */
669         case ATOMIC_READ:
670         case ATOMIC_WRITE:
671         case ATOMIC_RMW: {
672                 ModelAction *ret = NULL;
673
674                 /* linear search: from most recent to oldest */
675                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
676                 action_list_t::reverse_iterator rit;
677                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
678                         ModelAction *prev = *rit;
679                         if (prev->could_synchronize_with(act)) {
680                                 ret = prev;
681                                 break;
682                         }
683                 }
684
685                 ModelAction *ret2 = get_last_fence_conflict(act);
686                 if (!ret2)
687                         return ret;
688                 if (!ret)
689                         return ret2;
690                 if (*ret < *ret2)
691                         return ret2;
692                 return ret;
693         }
694         case ATOMIC_LOCK:
695         case ATOMIC_TRYLOCK: {
696                 /* linear search: from most recent to oldest */
697                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
698                 action_list_t::reverse_iterator rit;
699                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
700                         ModelAction *prev = *rit;
701                         if (act->is_conflicting_lock(prev))
702                                 return prev;
703                 }
704                 break;
705         }
706         case ATOMIC_UNLOCK: {
707                 /* linear search: from most recent to oldest */
708                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
709                 action_list_t::reverse_iterator rit;
710                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
711                         ModelAction *prev = *rit;
712                         if (!act->same_thread(prev) && prev->is_failed_trylock())
713                                 return prev;
714                 }
715                 break;
716         }
717         case ATOMIC_WAIT: {
718                 /* linear search: from most recent to oldest */
719                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
720                 action_list_t::reverse_iterator rit;
721                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
722                         ModelAction *prev = *rit;
723                         if (!act->same_thread(prev) && prev->is_failed_trylock())
724                                 return prev;
725                         if (!act->same_thread(prev) && prev->is_notify())
726                                 return prev;
727                 }
728                 break;
729         }
730
731         case ATOMIC_NOTIFY_ALL:
732         case ATOMIC_NOTIFY_ONE: {
733                 /* linear search: from most recent to oldest */
734                 action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
735                 action_list_t::reverse_iterator rit;
736                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
737                         ModelAction *prev = *rit;
738                         if (!act->same_thread(prev) && prev->is_wait())
739                                 return prev;
740                 }
741                 break;
742         }
743         default:
744                 break;
745         }
746         return NULL;
747 }
748
749 /** This method finds backtracking points where we should try to
750  * reorder the parameter ModelAction against.
751  *
752  * @param the ModelAction to find backtracking points for.
753  */
754 void ModelChecker::set_backtracking(ModelAction *act)
755 {
756         Thread *t = get_thread(act);
757         ModelAction *prev = get_last_conflict(act);
758         if (prev == NULL)
759                 return;
760
761         Node *node = prev->get_node()->get_parent();
762
763         int low_tid, high_tid;
764         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
765                 low_tid = id_to_int(act->get_tid());
766                 high_tid = low_tid + 1;
767         } else {
768                 low_tid = 0;
769                 high_tid = get_num_threads();
770         }
771
772         for (int i = low_tid; i < high_tid; i++) {
773                 thread_id_t tid = int_to_id(i);
774
775                 /* Make sure this thread can be enabled here. */
776                 if (i >= node->get_num_threads())
777                         break;
778
779                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
780                 if (node->enabled_status(tid) != THREAD_ENABLED)
781                         continue;
782
783                 /* Check if this has been explored already */
784                 if (node->has_been_explored(tid))
785                         continue;
786
787                 /* See if fairness allows */
788                 if (model->params.fairwindow != 0 && !node->has_priority(tid)) {
789                         bool unfair = false;
790                         for (int t = 0; t < node->get_num_threads(); t++) {
791                                 thread_id_t tother = int_to_id(t);
792                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
793                                         unfair = true;
794                                         break;
795                                 }
796                         }
797                         if (unfair)
798                                 continue;
799                 }
800                 /* Cache the latest backtracking point */
801                 set_latest_backtrack(prev);
802
803                 /* If this is a new backtracking point, mark the tree */
804                 if (!node->set_backtrack(tid))
805                         continue;
806                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
807                                         id_to_int(prev->get_tid()),
808                                         id_to_int(t->get_id()));
809                 if (DBG_ENABLED()) {
810                         prev->print();
811                         act->print();
812                 }
813         }
814 }
815
816 /**
817  * @brief Cache the a backtracking point as the "most recent", if eligible
818  *
819  * Note that this does not prepare the NodeStack for this backtracking
820  * operation, it only caches the action on a per-execution basis
821  *
822  * @param act The operation at which we should explore a different next action
823  * (i.e., backtracking point)
824  * @return True, if this action is now the most recent backtracking point;
825  * false otherwise
826  */
827 bool ModelChecker::set_latest_backtrack(ModelAction *act)
828 {
829         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
830                 priv->next_backtrack = act;
831                 return true;
832         }
833         return false;
834 }
835
836 /**
837  * Returns last backtracking point. The model checker will explore a different
838  * path for this point in the next execution.
839  * @return The ModelAction at which the next execution should diverge.
840  */
841 ModelAction * ModelChecker::get_next_backtrack()
842 {
843         ModelAction *next = priv->next_backtrack;
844         priv->next_backtrack = NULL;
845         return next;
846 }
847
848 /**
849  * Processes a read model action.
850  * @param curr is the read model action to process.
851  * @return True if processing this read updates the mo_graph.
852  */
853 bool ModelChecker::process_read(ModelAction *curr)
854 {
855         Node *node = curr->get_node();
856         uint64_t value = VALUE_NONE;
857         bool updated = false;
858         while (true) {
859                 switch (node->get_read_from_status()) {
860                 case READ_FROM_PAST: {
861                         const ModelAction *rf = node->get_read_from_past();
862                         ASSERT(rf);
863
864                         mo_graph->startChanges();
865                         value = rf->get_value();
866                         check_recency(curr, rf);
867                         bool r_status = r_modification_order(curr, rf);
868
869                         if (is_infeasible() && node->increment_read_from()) {
870                                 mo_graph->rollbackChanges();
871                                 priv->too_many_reads = false;
872                                 continue;
873                         }
874
875                         read_from(curr, rf);
876                         mo_graph->commitChanges();
877                         mo_check_promises(curr, true);
878
879                         updated |= r_status;
880                         break;
881                 }
882                 case READ_FROM_PROMISE: {
883                         Promise *promise = curr->get_node()->get_read_from_promise();
884                         promise->add_reader(curr);
885                         value = promise->get_value();
886                         curr->set_read_from_promise(promise);
887                         mo_graph->startChanges();
888                         updated = r_modification_order(curr, promise);
889                         mo_graph->commitChanges();
890                         break;
891                 }
892                 case READ_FROM_FUTURE: {
893                         /* Read from future value */
894                         struct future_value fv = node->get_future_value();
895                         Promise *promise = new Promise(curr, fv);
896                         value = fv.value;
897                         curr->set_read_from_promise(promise);
898                         promises->push_back(promise);
899                         mo_graph->startChanges();
900                         updated = r_modification_order(curr, promise);
901                         mo_graph->commitChanges();
902                         break;
903                 }
904                 default:
905                         ASSERT(false);
906                 }
907                 get_thread(curr)->set_return_value(value);
908                 return updated;
909         }
910 }
911
912 /**
913  * Processes a lock, trylock, or unlock model action.  @param curr is
914  * the read model action to process.
915  *
916  * The try lock operation checks whether the lock is taken.  If not,
917  * it falls to the normal lock operation case.  If so, it returns
918  * fail.
919  *
920  * The lock operation has already been checked that it is enabled, so
921  * it just grabs the lock and synchronizes with the previous unlock.
922  *
923  * The unlock operation has to re-enable all of the threads that are
924  * waiting on the lock.
925  *
926  * @return True if synchronization was updated; false otherwise
927  */
928 bool ModelChecker::process_mutex(ModelAction *curr)
929 {
930         std::mutex *mutex = NULL;
931         struct std::mutex_state *state = NULL;
932
933         if (curr->is_trylock() || curr->is_lock() || curr->is_unlock()) {
934                 mutex = (std::mutex *)curr->get_location();
935                 state = mutex->get_state();
936         } else if (curr->is_wait()) {
937                 mutex = (std::mutex *)curr->get_value();
938                 state = mutex->get_state();
939         }
940
941         switch (curr->get_type()) {
942         case ATOMIC_TRYLOCK: {
943                 bool success = !state->islocked;
944                 curr->set_try_lock(success);
945                 if (!success) {
946                         get_thread(curr)->set_return_value(0);
947                         break;
948                 }
949                 get_thread(curr)->set_return_value(1);
950         }
951                 //otherwise fall into the lock case
952         case ATOMIC_LOCK: {
953                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
954                         assert_bug("Lock access before initialization");
955                 state->islocked = true;
956                 ModelAction *unlock = get_last_unlock(curr);
957                 //synchronize with the previous unlock statement
958                 if (unlock != NULL) {
959                         curr->synchronize_with(unlock);
960                         return true;
961                 }
962                 break;
963         }
964         case ATOMIC_UNLOCK: {
965                 //unlock the lock
966                 state->islocked = false;
967                 //wake up the other threads
968                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, curr->get_location());
969                 //activate all the waiting threads
970                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
971                         scheduler->wake(get_thread(*rit));
972                 }
973                 waiters->clear();
974                 break;
975         }
976         case ATOMIC_WAIT: {
977                 //unlock the lock
978                 state->islocked = false;
979                 //wake up the other threads
980                 action_list_t *waiters = get_safe_ptr_action(lock_waiters_map, (void *) curr->get_value());
981                 //activate all the waiting threads
982                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
983                         scheduler->wake(get_thread(*rit));
984                 }
985                 waiters->clear();
986                 //check whether we should go to sleep or not...simulate spurious failures
987                 if (curr->get_node()->get_misc() == 0) {
988                         get_safe_ptr_action(condvar_waiters_map, curr->get_location())->push_back(curr);
989                         //disable us
990                         scheduler->sleep(get_thread(curr));
991                 }
992                 break;
993         }
994         case ATOMIC_NOTIFY_ALL: {
995                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
996                 //activate all the waiting threads
997                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
998                         scheduler->wake(get_thread(*rit));
999                 }
1000                 waiters->clear();
1001                 break;
1002         }
1003         case ATOMIC_NOTIFY_ONE: {
1004                 action_list_t *waiters = get_safe_ptr_action(condvar_waiters_map, curr->get_location());
1005                 int wakeupthread = curr->get_node()->get_misc();
1006                 action_list_t::iterator it = waiters->begin();
1007                 advance(it, wakeupthread);
1008                 scheduler->wake(get_thread(*it));
1009                 waiters->erase(it);
1010                 break;
1011         }
1012
1013         default:
1014                 ASSERT(0);
1015         }
1016         return false;
1017 }
1018
1019 void ModelChecker::add_future_value(const ModelAction *writer, ModelAction *reader)
1020 {
1021         /* Do more ambitious checks now that mo is more complete */
1022         if (mo_may_allow(writer, reader)) {
1023                 Node *node = reader->get_node();
1024
1025                 /* Find an ancestor thread which exists at the time of the reader */
1026                 Thread *write_thread = get_thread(writer);
1027                 while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
1028                         write_thread = write_thread->get_parent();
1029
1030                 struct future_value fv = {
1031                         writer->get_write_value(),
1032                         writer->get_seq_number() + params.maxfuturedelay,
1033                         write_thread->get_id(),
1034                 };
1035                 if (node->add_future_value(fv))
1036                         set_latest_backtrack(reader);
1037         }
1038 }
1039
1040 /**
1041  * Process a write ModelAction
1042  * @param curr The ModelAction to process
1043  * @return True if the mo_graph was updated or promises were resolved
1044  */
1045 bool ModelChecker::process_write(ModelAction *curr)
1046 {
1047         bool updated_mod_order = w_modification_order(curr);
1048         bool updated_promises = resolve_promises(curr);
1049
1050         if (promises->size() == 0) {
1051                 for (unsigned int i = 0; i < futurevalues->size(); i++) {
1052                         struct PendingFutureValue pfv = (*futurevalues)[i];
1053                         add_future_value(pfv.writer, pfv.act);
1054                 }
1055                 futurevalues->clear();
1056         }
1057
1058         mo_graph->commitChanges();
1059         mo_check_promises(curr, false);
1060
1061         get_thread(curr)->set_return_value(VALUE_NONE);
1062         return updated_mod_order || updated_promises;
1063 }
1064
1065 /**
1066  * Process a fence ModelAction
1067  * @param curr The ModelAction to process
1068  * @return True if synchronization was updated
1069  */
1070 bool ModelChecker::process_fence(ModelAction *curr)
1071 {
1072         /*
1073          * fence-relaxed: no-op
1074          * fence-release: only log the occurence (not in this function), for
1075          *   use in later synchronization
1076          * fence-acquire (this function): search for hypothetical release
1077          *   sequences
1078          */
1079         bool updated = false;
1080         if (curr->is_acquire()) {
1081                 action_list_t *list = action_trace;
1082                 action_list_t::reverse_iterator rit;
1083                 /* Find X : is_read(X) && X --sb-> curr */
1084                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1085                         ModelAction *act = *rit;
1086                         if (act == curr)
1087                                 continue;
1088                         if (act->get_tid() != curr->get_tid())
1089                                 continue;
1090                         /* Stop at the beginning of the thread */
1091                         if (act->is_thread_start())
1092                                 break;
1093                         /* Stop once we reach a prior fence-acquire */
1094                         if (act->is_fence() && act->is_acquire())
1095                                 break;
1096                         if (!act->is_read())
1097                                 continue;
1098                         /* read-acquire will find its own release sequences */
1099                         if (act->is_acquire())
1100                                 continue;
1101
1102                         /* Establish hypothetical release sequences */
1103                         rel_heads_list_t release_heads;
1104                         get_release_seq_heads(curr, act, &release_heads);
1105                         for (unsigned int i = 0; i < release_heads.size(); i++)
1106                                 if (!curr->synchronize_with(release_heads[i]))
1107                                         set_bad_synchronization();
1108                         if (release_heads.size() != 0)
1109                                 updated = true;
1110                 }
1111         }
1112         return updated;
1113 }
1114
1115 /**
1116  * @brief Process the current action for thread-related activity
1117  *
1118  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
1119  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
1120  * synchronization, etc.  This function is a no-op for non-THREAD actions
1121  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
1122  *
1123  * @param curr The current action
1124  * @return True if synchronization was updated or a thread completed
1125  */
1126 bool ModelChecker::process_thread_action(ModelAction *curr)
1127 {
1128         bool updated = false;
1129
1130         switch (curr->get_type()) {
1131         case THREAD_CREATE: {
1132                 thrd_t *thrd = (thrd_t *)curr->get_location();
1133                 struct thread_params *params = (struct thread_params *)curr->get_value();
1134                 Thread *th = new Thread(thrd, params->func, params->arg, get_thread(curr));
1135                 add_thread(th);
1136                 th->set_creation(curr);
1137                 /* Promises can be satisfied by children */
1138                 for (unsigned int i = 0; i < promises->size(); i++) {
1139                         Promise *promise = (*promises)[i];
1140                         if (promise->thread_is_available(curr->get_tid()))
1141                                 promise->add_thread(th->get_id());
1142                 }
1143                 break;
1144         }
1145         case THREAD_JOIN: {
1146                 Thread *blocking = curr->get_thread_operand();
1147                 ModelAction *act = get_last_action(blocking->get_id());
1148                 curr->synchronize_with(act);
1149                 updated = true; /* trigger rel-seq checks */
1150                 break;
1151         }
1152         case THREAD_FINISH: {
1153                 Thread *th = get_thread(curr);
1154                 while (!th->wait_list_empty()) {
1155                         ModelAction *act = th->pop_wait_list();
1156                         scheduler->wake(get_thread(act));
1157                 }
1158                 th->complete();
1159                 /* Completed thread can't satisfy promises */
1160                 for (unsigned int i = 0; i < promises->size(); i++) {
1161                         Promise *promise = (*promises)[i];
1162                         if (promise->thread_is_available(th->get_id()))
1163                                 if (promise->eliminate_thread(th->get_id()))
1164                                         priv->failed_promise = true;
1165                 }
1166                 updated = true; /* trigger rel-seq checks */
1167                 break;
1168         }
1169         case THREAD_START: {
1170                 check_promises(curr->get_tid(), NULL, curr->get_cv());
1171                 break;
1172         }
1173         default:
1174                 break;
1175         }
1176
1177         return updated;
1178 }
1179
1180 /**
1181  * @brief Process the current action for release sequence fixup activity
1182  *
1183  * Performs model-checker release sequence fixups for the current action,
1184  * forcing a single pending release sequence to break (with a given, potential
1185  * "loose" write) or to complete (i.e., synchronize). If a pending release
1186  * sequence forms a complete release sequence, then we must perform the fixup
1187  * synchronization, mo_graph additions, etc.
1188  *
1189  * @param curr The current action; must be a release sequence fixup action
1190  * @param work_queue The work queue to which to add work items as they are
1191  * generated
1192  */
1193 void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1194 {
1195         const ModelAction *write = curr->get_node()->get_relseq_break();
1196         struct release_seq *sequence = pending_rel_seqs->back();
1197         pending_rel_seqs->pop_back();
1198         ASSERT(sequence);
1199         ModelAction *acquire = sequence->acquire;
1200         const ModelAction *rf = sequence->rf;
1201         const ModelAction *release = sequence->release;
1202         ASSERT(acquire);
1203         ASSERT(release);
1204         ASSERT(rf);
1205         ASSERT(release->same_thread(rf));
1206
1207         if (write == NULL) {
1208                 /**
1209                  * @todo Forcing a synchronization requires that we set
1210                  * modification order constraints. For instance, we can't allow
1211                  * a fixup sequence in which two separate read-acquire
1212                  * operations read from the same sequence, where the first one
1213                  * synchronizes and the other doesn't. Essentially, we can't
1214                  * allow any writes to insert themselves between 'release' and
1215                  * 'rf'
1216                  */
1217
1218                 /* Must synchronize */
1219                 if (!acquire->synchronize_with(release)) {
1220                         set_bad_synchronization();
1221                         return;
1222                 }
1223                 /* Re-check all pending release sequences */
1224                 work_queue->push_back(CheckRelSeqWorkEntry(NULL));
1225                 /* Re-check act for mo_graph edges */
1226                 work_queue->push_back(MOEdgeWorkEntry(acquire));
1227
1228                 /* propagate synchronization to later actions */
1229                 action_list_t::reverse_iterator rit = action_trace->rbegin();
1230                 for (; (*rit) != acquire; rit++) {
1231                         ModelAction *propagate = *rit;
1232                         if (acquire->happens_before(propagate)) {
1233                                 propagate->synchronize_with(acquire);
1234                                 /* Re-check 'propagate' for mo_graph edges */
1235                                 work_queue->push_back(MOEdgeWorkEntry(propagate));
1236                         }
1237                 }
1238         } else {
1239                 /* Break release sequence with new edges:
1240                  *   release --mo--> write --mo--> rf */
1241                 mo_graph->addEdge(release, write);
1242                 mo_graph->addEdge(write, rf);
1243         }
1244
1245         /* See if we have realized a data race */
1246         checkDataRaces();
1247 }
1248
1249 /**
1250  * Initialize the current action by performing one or more of the following
1251  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1252  * in the NodeStack, manipulating backtracking sets, allocating and
1253  * initializing clock vectors, and computing the promises to fulfill.
1254  *
1255  * @param curr The current action, as passed from the user context; may be
1256  * freed/invalidated after the execution of this function, with a different
1257  * action "returned" its place (pass-by-reference)
1258  * @return True if curr is a newly-explored action; false otherwise
1259  */
1260 bool ModelChecker::initialize_curr_action(ModelAction **curr)
1261 {
1262         ModelAction *newcurr;
1263
1264         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1265                 newcurr = process_rmw(*curr);
1266                 delete *curr;
1267
1268                 if (newcurr->is_rmw())
1269                         compute_promises(newcurr);
1270
1271                 *curr = newcurr;
1272                 return false;
1273         }
1274
1275         (*curr)->set_seq_number(get_next_seq_num());
1276
1277         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1278         if (newcurr) {
1279                 /* First restore type and order in case of RMW operation */
1280                 if ((*curr)->is_rmwr())
1281                         newcurr->copy_typeandorder(*curr);
1282
1283                 ASSERT((*curr)->get_location() == newcurr->get_location());
1284                 newcurr->copy_from_new(*curr);
1285
1286                 /* Discard duplicate ModelAction; use action from NodeStack */
1287                 delete *curr;
1288
1289                 /* Always compute new clock vector */
1290                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1291
1292                 *curr = newcurr;
1293                 return false; /* Action was explored previously */
1294         } else {
1295                 newcurr = *curr;
1296
1297                 /* Always compute new clock vector */
1298                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1299
1300                 /* Assign most recent release fence */
1301                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1302
1303                 /*
1304                  * Perform one-time actions when pushing new ModelAction onto
1305                  * NodeStack
1306                  */
1307                 if (newcurr->is_write())
1308                         compute_promises(newcurr);
1309                 else if (newcurr->is_relseq_fixup())
1310                         compute_relseq_breakwrites(newcurr);
1311                 else if (newcurr->is_wait())
1312                         newcurr->get_node()->set_misc_max(2);
1313                 else if (newcurr->is_notify_one()) {
1314                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(condvar_waiters_map, newcurr->get_location())->size());
1315                 }
1316                 return true; /* This was a new ModelAction */
1317         }
1318 }
1319
1320 /**
1321  * @brief Establish reads-from relation between two actions
1322  *
1323  * Perform basic operations involved with establishing a concrete rf relation,
1324  * including setting the ModelAction data and checking for release sequences.
1325  *
1326  * @param act The action that is reading (must be a read)
1327  * @param rf The action from which we are reading (must be a write)
1328  *
1329  * @return True if this read established synchronization
1330  */
1331 bool ModelChecker::read_from(ModelAction *act, const ModelAction *rf)
1332 {
1333         ASSERT(rf);
1334         act->set_read_from(rf);
1335         if (act->is_acquire()) {
1336                 rel_heads_list_t release_heads;
1337                 get_release_seq_heads(act, act, &release_heads);
1338                 int num_heads = release_heads.size();
1339                 for (unsigned int i = 0; i < release_heads.size(); i++)
1340                         if (!act->synchronize_with(release_heads[i])) {
1341                                 set_bad_synchronization();
1342                                 num_heads--;
1343                         }
1344                 return num_heads > 0;
1345         }
1346         return false;
1347 }
1348
1349 /**
1350  * Check promises and eliminate potentially-satisfying threads when a thread is
1351  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1352  * no longer satisfy a promise generated from that thread.
1353  *
1354  * @param blocker The thread on which a thread is waiting
1355  * @param waiting The waiting thread
1356  */
1357 void ModelChecker::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1358 {
1359         for (unsigned int i = 0; i < promises->size(); i++) {
1360                 Promise *promise = (*promises)[i];
1361                 if (!promise->thread_is_available(waiting->get_id()))
1362                         continue;
1363                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1364                         ModelAction *reader = promise->get_reader(j);
1365                         if (reader->get_tid() != blocker->get_id())
1366                                 continue;
1367                         if (promise->eliminate_thread(waiting->get_id())) {
1368                                 /* Promise has failed */
1369                                 priv->failed_promise = true;
1370                         } else {
1371                                 /* Only eliminate the 'waiting' thread once */
1372                                 return;
1373                         }
1374                 }
1375         }
1376 }
1377
1378 /**
1379  * @brief Check whether a model action is enabled.
1380  *
1381  * Checks whether a lock or join operation would be successful (i.e., is the
1382  * lock already locked, or is the joined thread already complete). If not, put
1383  * the action in a waiter list.
1384  *
1385  * @param curr is the ModelAction to check whether it is enabled.
1386  * @return a bool that indicates whether the action is enabled.
1387  */
1388 bool ModelChecker::check_action_enabled(ModelAction *curr) {
1389         if (curr->is_lock()) {
1390                 std::mutex *lock = (std::mutex *)curr->get_location();
1391                 struct std::mutex_state *state = lock->get_state();
1392                 if (state->islocked) {
1393                         //Stick the action in the appropriate waiting queue
1394                         get_safe_ptr_action(lock_waiters_map, curr->get_location())->push_back(curr);
1395                         return false;
1396                 }
1397         } else if (curr->get_type() == THREAD_JOIN) {
1398                 Thread *blocking = (Thread *)curr->get_location();
1399                 if (!blocking->is_complete()) {
1400                         blocking->push_wait_list(curr);
1401                         thread_blocking_check_promises(blocking, get_thread(curr));
1402                         return false;
1403                 }
1404         }
1405
1406         return true;
1407 }
1408
1409 /**
1410  * This is the heart of the model checker routine. It performs model-checking
1411  * actions corresponding to a given "current action." Among other processes, it
1412  * calculates reads-from relationships, updates synchronization clock vectors,
1413  * forms a memory_order constraints graph, and handles replay/backtrack
1414  * execution when running permutations of previously-observed executions.
1415  *
1416  * @param curr The current action to process
1417  * @return The ModelAction that is actually executed; may be different than
1418  * curr; may be NULL, if the current action is not enabled to run
1419  */
1420 ModelAction * ModelChecker::check_current_action(ModelAction *curr)
1421 {
1422         ASSERT(curr);
1423         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1424
1425         if (!check_action_enabled(curr)) {
1426                 /* Make the execution look like we chose to run this action
1427                  * much later, when a lock/join can succeed */
1428                 get_thread(curr)->set_pending(curr);
1429                 scheduler->sleep(get_thread(curr));
1430                 return NULL;
1431         }
1432
1433         bool newly_explored = initialize_curr_action(&curr);
1434
1435         DBG();
1436         if (DBG_ENABLED())
1437                 curr->print();
1438
1439         wake_up_sleeping_actions(curr);
1440
1441         /* Add the action to lists before any other model-checking tasks */
1442         if (!second_part_of_rmw)
1443                 add_action_to_lists(curr);
1444
1445         /* Build may_read_from set for newly-created actions */
1446         if (newly_explored && curr->is_read())
1447                 build_may_read_from(curr);
1448
1449         /* Initialize work_queue with the "current action" work */
1450         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1451         while (!work_queue.empty() && !has_asserted()) {
1452                 WorkQueueEntry work = work_queue.front();
1453                 work_queue.pop_front();
1454
1455                 switch (work.type) {
1456                 case WORK_CHECK_CURR_ACTION: {
1457                         ModelAction *act = work.action;
1458                         bool update = false; /* update this location's release seq's */
1459                         bool update_all = false; /* update all release seq's */
1460
1461                         if (process_thread_action(curr))
1462                                 update_all = true;
1463
1464                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1465                                 update = true;
1466
1467                         if (act->is_write() && process_write(act))
1468                                 update = true;
1469
1470                         if (act->is_fence() && process_fence(act))
1471                                 update_all = true;
1472
1473                         if (act->is_mutex_op() && process_mutex(act))
1474                                 update_all = true;
1475
1476                         if (act->is_relseq_fixup())
1477                                 process_relseq_fixup(curr, &work_queue);
1478
1479                         if (update_all)
1480                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1481                         else if (update)
1482                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1483                         break;
1484                 }
1485                 case WORK_CHECK_RELEASE_SEQ:
1486                         resolve_release_sequences(work.location, &work_queue);
1487                         break;
1488                 case WORK_CHECK_MO_EDGES: {
1489                         /** @todo Complete verification of work_queue */
1490                         ModelAction *act = work.action;
1491                         bool updated = false;
1492
1493                         if (act->is_read()) {
1494                                 const ModelAction *rf = act->get_reads_from();
1495                                 const Promise *promise = act->get_reads_from_promise();
1496                                 if (rf) {
1497                                         if (r_modification_order(act, rf))
1498                                                 updated = true;
1499                                 } else if (promise) {
1500                                         if (r_modification_order(act, promise))
1501                                                 updated = true;
1502                                 }
1503                         }
1504                         if (act->is_write()) {
1505                                 if (w_modification_order(act))
1506                                         updated = true;
1507                         }
1508                         mo_graph->commitChanges();
1509
1510                         if (updated)
1511                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1512                         break;
1513                 }
1514                 default:
1515                         ASSERT(false);
1516                         break;
1517                 }
1518         }
1519
1520         check_curr_backtracking(curr);
1521         set_backtracking(curr);
1522         return curr;
1523 }
1524
1525 void ModelChecker::check_curr_backtracking(ModelAction *curr)
1526 {
1527         Node *currnode = curr->get_node();
1528         Node *parnode = currnode->get_parent();
1529
1530         if ((parnode && !parnode->backtrack_empty()) ||
1531                          !currnode->misc_empty() ||
1532                          !currnode->read_from_empty() ||
1533                          !currnode->promise_empty() ||
1534                          !currnode->relseq_break_empty()) {
1535                 set_latest_backtrack(curr);
1536         }
1537 }
1538
1539 bool ModelChecker::promises_expired() const
1540 {
1541         for (unsigned int i = 0; i < promises->size(); i++) {
1542                 Promise *promise = (*promises)[i];
1543                 if (promise->get_expiration() < priv->used_sequence_numbers)
1544                         return true;
1545         }
1546         return false;
1547 }
1548
1549 /**
1550  * This is the strongest feasibility check available.
1551  * @return whether the current trace (partial or complete) must be a prefix of
1552  * a feasible trace.
1553  */
1554 bool ModelChecker::isfeasibleprefix() const
1555 {
1556         return pending_rel_seqs->size() == 0 && is_feasible_prefix_ignore_relseq();
1557 }
1558
1559 /**
1560  * Print disagnostic information about an infeasible execution
1561  * @param prefix A string to prefix the output with; if NULL, then a default
1562  * message prefix will be provided
1563  */
1564 void ModelChecker::print_infeasibility(const char *prefix) const
1565 {
1566         char buf[100];
1567         char *ptr = buf;
1568         if (mo_graph->checkForCycles())
1569                 ptr += sprintf(ptr, "[mo cycle]");
1570         if (priv->failed_promise)
1571                 ptr += sprintf(ptr, "[failed promise]");
1572         if (priv->too_many_reads)
1573                 ptr += sprintf(ptr, "[too many reads]");
1574         if (priv->no_valid_reads)
1575                 ptr += sprintf(ptr, "[no valid reads-from]");
1576         if (priv->bad_synchronization)
1577                 ptr += sprintf(ptr, "[bad sw ordering]");
1578         if (promises_expired())
1579                 ptr += sprintf(ptr, "[promise expired]");
1580         if (promises->size() != 0)
1581                 ptr += sprintf(ptr, "[unresolved promise]");
1582         if (ptr != buf)
1583                 model_print("%s: %s\n", prefix ? prefix : "Infeasible", buf);
1584 }
1585
1586 /**
1587  * Returns whether the current completed trace is feasible, except for pending
1588  * release sequences.
1589  */
1590 bool ModelChecker::is_feasible_prefix_ignore_relseq() const
1591 {
1592         return !is_infeasible() && promises->size() == 0;
1593 }
1594
1595 /**
1596  * Check if the current partial trace is infeasible. Does not check any
1597  * end-of-execution flags, which might rule out the execution. Thus, this is
1598  * useful only for ruling an execution as infeasible.
1599  * @return whether the current partial trace is infeasible.
1600  */
1601 bool ModelChecker::is_infeasible() const
1602 {
1603         return mo_graph->checkForCycles() ||
1604                 priv->no_valid_reads ||
1605                 priv->failed_promise ||
1606                 priv->too_many_reads ||
1607                 priv->bad_synchronization ||
1608                 promises_expired();
1609 }
1610
1611 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1612 ModelAction * ModelChecker::process_rmw(ModelAction *act) {
1613         ModelAction *lastread = get_last_action(act->get_tid());
1614         lastread->process_rmw(act);
1615         if (act->is_rmw()) {
1616                 if (lastread->get_reads_from())
1617                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1618                 else
1619                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1620                 mo_graph->commitChanges();
1621         }
1622         return lastread;
1623 }
1624
1625 /**
1626  * Checks whether a thread has read from the same write for too many times
1627  * without seeing the effects of a later write.
1628  *
1629  * Basic idea:
1630  * 1) there must a different write that we could read from that would satisfy the modification order,
1631  * 2) we must have read from the same value in excess of maxreads times, and
1632  * 3) that other write must have been in the reads_from set for maxreads times.
1633  *
1634  * If so, we decide that the execution is no longer feasible.
1635  */
1636 void ModelChecker::check_recency(ModelAction *curr, const ModelAction *rf)
1637 {
1638         if (params.maxreads != 0) {
1639                 if (curr->get_node()->get_read_from_past_size() <= 1)
1640                         return;
1641                 //Must make sure that execution is currently feasible...  We could
1642                 //accidentally clear by rolling back
1643                 if (is_infeasible())
1644                         return;
1645                 std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1646                 int tid = id_to_int(curr->get_tid());
1647
1648                 /* Skip checks */
1649                 if ((int)thrd_lists->size() <= tid)
1650                         return;
1651                 action_list_t *list = &(*thrd_lists)[tid];
1652
1653                 action_list_t::reverse_iterator rit = list->rbegin();
1654                 /* Skip past curr */
1655                 for (; (*rit) != curr; rit++)
1656                         ;
1657                 /* go past curr now */
1658                 rit++;
1659
1660                 action_list_t::reverse_iterator ritcopy = rit;
1661                 //See if we have enough reads from the same value
1662                 int count = 0;
1663                 for (; count < params.maxreads; rit++, count++) {
1664                         if (rit == list->rend())
1665                                 return;
1666                         ModelAction *act = *rit;
1667                         if (!act->is_read())
1668                                 return;
1669
1670                         if (act->get_reads_from() != rf)
1671                                 return;
1672                         if (act->get_node()->get_read_from_past_size() <= 1)
1673                                 return;
1674                 }
1675                 for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1676                         /* Get write */
1677                         const ModelAction *write = curr->get_node()->get_read_from_past(i);
1678
1679                         /* Need a different write */
1680                         if (write == rf)
1681                                 continue;
1682
1683                         /* Test to see whether this is a feasible write to read from */
1684                         /** NOTE: all members of read-from set should be
1685                          *  feasible, so we no longer check it here **/
1686
1687                         rit = ritcopy;
1688
1689                         bool feasiblewrite = true;
1690                         //new we need to see if this write works for everyone
1691
1692                         for (int loop = count; loop > 0; loop--, rit++) {
1693                                 ModelAction *act = *rit;
1694                                 bool foundvalue = false;
1695                                 for (int j = 0; j < act->get_node()->get_read_from_past_size(); j++) {
1696                                         if (act->get_node()->get_read_from_past(j) == write) {
1697                                                 foundvalue = true;
1698                                                 break;
1699                                         }
1700                                 }
1701                                 if (!foundvalue) {
1702                                         feasiblewrite = false;
1703                                         break;
1704                                 }
1705                         }
1706                         if (feasiblewrite) {
1707                                 priv->too_many_reads = true;
1708                                 return;
1709                         }
1710                 }
1711         }
1712 }
1713
1714 /**
1715  * Updates the mo_graph with the constraints imposed from the current
1716  * read.
1717  *
1718  * Basic idea is the following: Go through each other thread and find
1719  * the last action that happened before our read.  Two cases:
1720  *
1721  * (1) The action is a write => that write must either occur before
1722  * the write we read from or be the write we read from.
1723  *
1724  * (2) The action is a read => the write that that action read from
1725  * must occur before the write we read from or be the same write.
1726  *
1727  * @param curr The current action. Must be a read.
1728  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1729  * @return True if modification order edges were added; false otherwise
1730  */
1731 template <typename rf_type>
1732 bool ModelChecker::r_modification_order(ModelAction *curr, const rf_type *rf)
1733 {
1734         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1735         unsigned int i;
1736         bool added = false;
1737         ASSERT(curr->is_read());
1738
1739         /* Last SC fence in the current thread */
1740         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1741
1742         /* Iterate over all threads */
1743         for (i = 0; i < thrd_lists->size(); i++) {
1744                 /* Last SC fence in thread i */
1745                 ModelAction *last_sc_fence_thread_local = NULL;
1746                 if (int_to_id((int)i) != curr->get_tid())
1747                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1748
1749                 /* Last SC fence in thread i, before last SC fence in current thread */
1750                 ModelAction *last_sc_fence_thread_before = NULL;
1751                 if (last_sc_fence_local)
1752                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1753
1754                 /* Iterate over actions in thread, starting from most recent */
1755                 action_list_t *list = &(*thrd_lists)[i];
1756                 action_list_t::reverse_iterator rit;
1757                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1758                         ModelAction *act = *rit;
1759
1760                         if (act->is_write() && !act->equals(rf) && act != curr) {
1761                                 /* C++, Section 29.3 statement 5 */
1762                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1763                                                 *act < *last_sc_fence_thread_local) {
1764                                         added = mo_graph->addEdge(act, rf) || added;
1765                                         break;
1766                                 }
1767                                 /* C++, Section 29.3 statement 4 */
1768                                 else if (act->is_seqcst() && last_sc_fence_local &&
1769                                                 *act < *last_sc_fence_local) {
1770                                         added = mo_graph->addEdge(act, rf) || added;
1771                                         break;
1772                                 }
1773                                 /* C++, Section 29.3 statement 6 */
1774                                 else if (last_sc_fence_thread_before &&
1775                                                 *act < *last_sc_fence_thread_before) {
1776                                         added = mo_graph->addEdge(act, rf) || added;
1777                                         break;
1778                                 }
1779                         }
1780
1781                         /*
1782                          * Include at most one act per-thread that "happens
1783                          * before" curr. Don't consider reflexively.
1784                          */
1785                         if (act->happens_before(curr) && act != curr) {
1786                                 if (act->is_write()) {
1787                                         if (!act->equals(rf)) {
1788                                                 added = mo_graph->addEdge(act, rf) || added;
1789                                         }
1790                                 } else {
1791                                         const ModelAction *prevrf = act->get_reads_from();
1792                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1793                                         if (prevrf) {
1794                                                 if (!prevrf->equals(rf))
1795                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1796                                         } else if (!prevrf_promise->equals(rf)) {
1797                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1798                                         }
1799                                 }
1800                                 break;
1801                         }
1802                 }
1803         }
1804
1805         /*
1806          * All compatible, thread-exclusive promises must be ordered after any
1807          * concrete loads from the same thread
1808          */
1809         for (unsigned int i = 0; i < promises->size(); i++)
1810                 if ((*promises)[i]->is_compatible_exclusive(curr))
1811                         added = mo_graph->addEdge(rf, (*promises)[i]) || added;
1812
1813         return added;
1814 }
1815
1816 /**
1817  * Updates the mo_graph with the constraints imposed from the current write.
1818  *
1819  * Basic idea is the following: Go through each other thread and find
1820  * the lastest action that happened before our write.  Two cases:
1821  *
1822  * (1) The action is a write => that write must occur before
1823  * the current write
1824  *
1825  * (2) The action is a read => the write that that action read from
1826  * must occur before the current write.
1827  *
1828  * This method also handles two other issues:
1829  *
1830  * (I) Sequential Consistency: Making sure that if the current write is
1831  * seq_cst, that it occurs after the previous seq_cst write.
1832  *
1833  * (II) Sending the write back to non-synchronizing reads.
1834  *
1835  * @param curr The current action. Must be a write.
1836  * @return True if modification order edges were added; false otherwise
1837  */
1838 bool ModelChecker::w_modification_order(ModelAction *curr)
1839 {
1840         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
1841         unsigned int i;
1842         bool added = false;
1843         ASSERT(curr->is_write());
1844
1845         if (curr->is_seqcst()) {
1846                 /* We have to at least see the last sequentially consistent write,
1847                          so we are initialized. */
1848                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1849                 if (last_seq_cst != NULL) {
1850                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1851                 }
1852         }
1853
1854         /* Last SC fence in the current thread */
1855         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1856
1857         /* Iterate over all threads */
1858         for (i = 0; i < thrd_lists->size(); i++) {
1859                 /* Last SC fence in thread i, before last SC fence in current thread */
1860                 ModelAction *last_sc_fence_thread_before = NULL;
1861                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1862                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1863
1864                 /* Iterate over actions in thread, starting from most recent */
1865                 action_list_t *list = &(*thrd_lists)[i];
1866                 action_list_t::reverse_iterator rit;
1867                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1868                         ModelAction *act = *rit;
1869                         if (act == curr) {
1870                                 /*
1871                                  * 1) If RMW and it actually read from something, then we
1872                                  * already have all relevant edges, so just skip to next
1873                                  * thread.
1874                                  *
1875                                  * 2) If RMW and it didn't read from anything, we should
1876                                  * whatever edge we can get to speed up convergence.
1877                                  *
1878                                  * 3) If normal write, we need to look at earlier actions, so
1879                                  * continue processing list.
1880                                  */
1881                                 if (curr->is_rmw()) {
1882                                         if (curr->get_reads_from() != NULL)
1883                                                 break;
1884                                         else
1885                                                 continue;
1886                                 } else
1887                                         continue;
1888                         }
1889
1890                         /* C++, Section 29.3 statement 7 */
1891                         if (last_sc_fence_thread_before && act->is_write() &&
1892                                         *act < *last_sc_fence_thread_before) {
1893                                 added = mo_graph->addEdge(act, curr) || added;
1894                                 break;
1895                         }
1896
1897                         /*
1898                          * Include at most one act per-thread that "happens
1899                          * before" curr
1900                          */
1901                         if (act->happens_before(curr)) {
1902                                 /*
1903                                  * Note: if act is RMW, just add edge:
1904                                  *   act --mo--> curr
1905                                  * The following edge should be handled elsewhere:
1906                                  *   readfrom(act) --mo--> act
1907                                  */
1908                                 if (act->is_write())
1909                                         added = mo_graph->addEdge(act, curr) || added;
1910                                 else if (act->is_read()) {
1911                                         //if previous read accessed a null, just keep going
1912                                         if (act->get_reads_from() == NULL)
1913                                                 continue;
1914                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1915                                 }
1916                                 break;
1917                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1918                                                      !act->same_thread(curr)) {
1919                                 /* We have an action that:
1920                                    (1) did not happen before us
1921                                    (2) is a read and we are a write
1922                                    (3) cannot synchronize with us
1923                                    (4) is in a different thread
1924                                    =>
1925                                    that read could potentially read from our write.  Note that
1926                                    these checks are overly conservative at this point, we'll
1927                                    do more checks before actually removing the
1928                                    pendingfuturevalue.
1929
1930                                  */
1931                                 if (thin_air_constraint_may_allow(curr, act)) {
1932                                         if (!is_infeasible())
1933                                                 futurevalues->push_back(PendingFutureValue(curr, act));
1934                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1935                                                 add_future_value(curr, act);
1936                                 }
1937                         }
1938                 }
1939         }
1940
1941         /*
1942          * All compatible, thread-exclusive promises must be ordered after any
1943          * concrete stores to the same thread, or else they can be merged with
1944          * this store later
1945          */
1946         for (unsigned int i = 0; i < promises->size(); i++)
1947                 if ((*promises)[i]->is_compatible_exclusive(curr))
1948                         added = mo_graph->addEdge(curr, (*promises)[i]) || added;
1949
1950         return added;
1951 }
1952
1953 /** Arbitrary reads from the future are not allowed.  Section 29.3
1954  * part 9 places some constraints.  This method checks one result of constraint
1955  * constraint.  Others require compiler support. */
1956 bool ModelChecker::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader)
1957 {
1958         if (!writer->is_rmw())
1959                 return true;
1960
1961         if (!reader->is_rmw())
1962                 return true;
1963
1964         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1965                 if (search == reader)
1966                         return false;
1967                 if (search->get_tid() == reader->get_tid() &&
1968                                 search->happens_before(reader))
1969                         break;
1970         }
1971
1972         return true;
1973 }
1974
1975 /**
1976  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1977  * some constraints. This method checks one the following constraint (others
1978  * require compiler support):
1979  *
1980  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1981  */
1982 bool ModelChecker::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1983 {
1984         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, reader->get_location());
1985         unsigned int i;
1986         /* Iterate over all threads */
1987         for (i = 0; i < thrd_lists->size(); i++) {
1988                 const ModelAction *write_after_read = NULL;
1989
1990                 /* Iterate over actions in thread, starting from most recent */
1991                 action_list_t *list = &(*thrd_lists)[i];
1992                 action_list_t::reverse_iterator rit;
1993                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1994                         ModelAction *act = *rit;
1995
1996                         /* Don't disallow due to act == reader */
1997                         if (!reader->happens_before(act) || reader == act)
1998                                 break;
1999                         else if (act->is_write())
2000                                 write_after_read = act;
2001                         else if (act->is_read() && act->get_reads_from() != NULL)
2002                                 write_after_read = act->get_reads_from();
2003                 }
2004
2005                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
2006                         return false;
2007         }
2008         return true;
2009 }
2010
2011 /**
2012  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
2013  * The ModelAction under consideration is expected to be taking part in
2014  * release/acquire synchronization as an object of the "reads from" relation.
2015  * Note that this can only provide release sequence support for RMW chains
2016  * which do not read from the future, as those actions cannot be traced until
2017  * their "promise" is fulfilled. Similarly, we may not even establish the
2018  * presence of a release sequence with certainty, as some modification order
2019  * constraints may be decided further in the future. Thus, this function
2020  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
2021  * and a boolean representing certainty.
2022  *
2023  * @param rf The action that might be part of a release sequence. Must be a
2024  * write.
2025  * @param release_heads A pass-by-reference style return parameter. After
2026  * execution of this function, release_heads will contain the heads of all the
2027  * relevant release sequences, if any exists with certainty
2028  * @param pending A pass-by-reference style return parameter which is only used
2029  * when returning false (i.e., uncertain). Returns most information regarding
2030  * an uncertain release sequence, including any write operations that might
2031  * break the sequence.
2032  * @return true, if the ModelChecker is certain that release_heads is complete;
2033  * false otherwise
2034  */
2035 bool ModelChecker::release_seq_heads(const ModelAction *rf,
2036                 rel_heads_list_t *release_heads,
2037                 struct release_seq *pending) const
2038 {
2039         /* Only check for release sequences if there are no cycles */
2040         if (mo_graph->checkForCycles())
2041                 return false;
2042
2043         for ( ; rf != NULL; rf = rf->get_reads_from()) {
2044                 ASSERT(rf->is_write());
2045
2046                 if (rf->is_release())
2047                         release_heads->push_back(rf);
2048                 else if (rf->get_last_fence_release())
2049                         release_heads->push_back(rf->get_last_fence_release());
2050                 if (!rf->is_rmw())
2051                         break; /* End of RMW chain */
2052
2053                 /** @todo Need to be smarter here...  In the linux lock
2054                  * example, this will run to the beginning of the program for
2055                  * every acquire. */
2056                 /** @todo The way to be smarter here is to keep going until 1
2057                  * thread has a release preceded by an acquire and you've seen
2058                  *       both. */
2059
2060                 /* acq_rel RMW is a sufficient stopping condition */
2061                 if (rf->is_acquire() && rf->is_release())
2062                         return true; /* complete */
2063         };
2064         if (!rf) {
2065                 /* read from future: need to settle this later */
2066                 pending->rf = NULL;
2067                 return false; /* incomplete */
2068         }
2069
2070         if (rf->is_release())
2071                 return true; /* complete */
2072
2073         /* else relaxed write
2074          * - check for fence-release in the same thread (29.8, stmt. 3)
2075          * - check modification order for contiguous subsequence
2076          *   -> rf must be same thread as release */
2077
2078         const ModelAction *fence_release = rf->get_last_fence_release();
2079         /* Synchronize with a fence-release unconditionally; we don't need to
2080          * find any more "contiguous subsequence..." for it */
2081         if (fence_release)
2082                 release_heads->push_back(fence_release);
2083
2084         int tid = id_to_int(rf->get_tid());
2085         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, rf->get_location());
2086         action_list_t *list = &(*thrd_lists)[tid];
2087         action_list_t::const_reverse_iterator rit;
2088
2089         /* Find rf in the thread list */
2090         rit = std::find(list->rbegin(), list->rend(), rf);
2091         ASSERT(rit != list->rend());
2092
2093         /* Find the last {write,fence}-release */
2094         for (; rit != list->rend(); rit++) {
2095                 if (fence_release && *(*rit) < *fence_release)
2096                         break;
2097                 if ((*rit)->is_release())
2098                         break;
2099         }
2100         if (rit == list->rend()) {
2101                 /* No write-release in this thread */
2102                 return true; /* complete */
2103         } else if (fence_release && *(*rit) < *fence_release) {
2104                 /* The fence-release is more recent (and so, "stronger") than
2105                  * the most recent write-release */
2106                 return true; /* complete */
2107         } /* else, need to establish contiguous release sequence */
2108         ModelAction *release = *rit;
2109
2110         ASSERT(rf->same_thread(release));
2111
2112         pending->writes.clear();
2113
2114         bool certain = true;
2115         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2116                 if (id_to_int(rf->get_tid()) == (int)i)
2117                         continue;
2118                 list = &(*thrd_lists)[i];
2119
2120                 /* Can we ensure no future writes from this thread may break
2121                  * the release seq? */
2122                 bool future_ordered = false;
2123
2124                 ModelAction *last = get_last_action(int_to_id(i));
2125                 Thread *th = get_thread(int_to_id(i));
2126                 if ((last && rf->happens_before(last)) ||
2127                                 !is_enabled(th) ||
2128                                 th->is_complete())
2129                         future_ordered = true;
2130
2131                 ASSERT(!th->is_model_thread() || future_ordered);
2132
2133                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2134                         const ModelAction *act = *rit;
2135                         /* Reach synchronization -> this thread is complete */
2136                         if (act->happens_before(release))
2137                                 break;
2138                         if (rf->happens_before(act)) {
2139                                 future_ordered = true;
2140                                 continue;
2141                         }
2142
2143                         /* Only non-RMW writes can break release sequences */
2144                         if (!act->is_write() || act->is_rmw())
2145                                 continue;
2146
2147                         /* Check modification order */
2148                         if (mo_graph->checkReachable(rf, act)) {
2149                                 /* rf --mo--> act */
2150                                 future_ordered = true;
2151                                 continue;
2152                         }
2153                         if (mo_graph->checkReachable(act, release))
2154                                 /* act --mo--> release */
2155                                 break;
2156                         if (mo_graph->checkReachable(release, act) &&
2157                                       mo_graph->checkReachable(act, rf)) {
2158                                 /* release --mo-> act --mo--> rf */
2159                                 return true; /* complete */
2160                         }
2161                         /* act may break release sequence */
2162                         pending->writes.push_back(act);
2163                         certain = false;
2164                 }
2165                 if (!future_ordered)
2166                         certain = false; /* This thread is uncertain */
2167         }
2168
2169         if (certain) {
2170                 release_heads->push_back(release);
2171                 pending->writes.clear();
2172         } else {
2173                 pending->release = release;
2174                 pending->rf = rf;
2175         }
2176         return certain;
2177 }
2178
2179 /**
2180  * An interface for getting the release sequence head(s) with which a
2181  * given ModelAction must synchronize. This function only returns a non-empty
2182  * result when it can locate a release sequence head with certainty. Otherwise,
2183  * it may mark the internal state of the ModelChecker so that it will handle
2184  * the release sequence at a later time, causing @a acquire to update its
2185  * synchronization at some later point in execution.
2186  *
2187  * @param acquire The 'acquire' action that may synchronize with a release
2188  * sequence
2189  * @param read The read action that may read from a release sequence; this may
2190  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2191  * when 'acquire' is a fence-acquire)
2192  * @param release_heads A pass-by-reference return parameter. Will be filled
2193  * with the head(s) of the release sequence(s), if they exists with certainty.
2194  * @see ModelChecker::release_seq_heads
2195  */
2196 void ModelChecker::get_release_seq_heads(ModelAction *acquire,
2197                 ModelAction *read, rel_heads_list_t *release_heads)
2198 {
2199         const ModelAction *rf = read->get_reads_from();
2200         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2201         sequence->acquire = acquire;
2202         sequence->read = read;
2203
2204         if (!release_seq_heads(rf, release_heads, sequence)) {
2205                 /* add act to 'lazy checking' list */
2206                 pending_rel_seqs->push_back(sequence);
2207         } else {
2208                 snapshot_free(sequence);
2209         }
2210 }
2211
2212 /**
2213  * Attempt to resolve all stashed operations that might synchronize with a
2214  * release sequence for a given location. This implements the "lazy" portion of
2215  * determining whether or not a release sequence was contiguous, since not all
2216  * modification order information is present at the time an action occurs.
2217  *
2218  * @param location The location/object that should be checked for release
2219  * sequence resolutions. A NULL value means to check all locations.
2220  * @param work_queue The work queue to which to add work items as they are
2221  * generated
2222  * @return True if any updates occurred (new synchronization, new mo_graph
2223  * edges)
2224  */
2225 bool ModelChecker::resolve_release_sequences(void *location, work_queue_t *work_queue)
2226 {
2227         bool updated = false;
2228         std::vector< struct release_seq *, SnapshotAlloc<struct release_seq *> >::iterator it = pending_rel_seqs->begin();
2229         while (it != pending_rel_seqs->end()) {
2230                 struct release_seq *pending = *it;
2231                 ModelAction *acquire = pending->acquire;
2232                 const ModelAction *read = pending->read;
2233
2234                 /* Only resolve sequences on the given location, if provided */
2235                 if (location && read->get_location() != location) {
2236                         it++;
2237                         continue;
2238                 }
2239
2240                 const ModelAction *rf = read->get_reads_from();
2241                 rel_heads_list_t release_heads;
2242                 bool complete;
2243                 complete = release_seq_heads(rf, &release_heads, pending);
2244                 for (unsigned int i = 0; i < release_heads.size(); i++) {
2245                         if (!acquire->has_synchronized_with(release_heads[i])) {
2246                                 if (acquire->synchronize_with(release_heads[i]))
2247                                         updated = true;
2248                                 else
2249                                         set_bad_synchronization();
2250                         }
2251                 }
2252
2253                 if (updated) {
2254                         /* Re-check all pending release sequences */
2255                         work_queue->push_back(CheckRelSeqWorkEntry(NULL));
2256                         /* Re-check read-acquire for mo_graph edges */
2257                         if (acquire->is_read())
2258                                 work_queue->push_back(MOEdgeWorkEntry(acquire));
2259
2260                         /* propagate synchronization to later actions */
2261                         action_list_t::reverse_iterator rit = action_trace->rbegin();
2262                         for (; (*rit) != acquire; rit++) {
2263                                 ModelAction *propagate = *rit;
2264                                 if (acquire->happens_before(propagate)) {
2265                                         propagate->synchronize_with(acquire);
2266                                         /* Re-check 'propagate' for mo_graph edges */
2267                                         work_queue->push_back(MOEdgeWorkEntry(propagate));
2268                                 }
2269                         }
2270                 }
2271                 if (complete) {
2272                         it = pending_rel_seqs->erase(it);
2273                         snapshot_free(pending);
2274                 } else {
2275                         it++;
2276                 }
2277         }
2278
2279         // If we resolved promises or data races, see if we have realized a data race.
2280         checkDataRaces();
2281
2282         return updated;
2283 }
2284
2285 /**
2286  * Performs various bookkeeping operations for the current ModelAction. For
2287  * instance, adds action to the per-object, per-thread action vector and to the
2288  * action trace list of all thread actions.
2289  *
2290  * @param act is the ModelAction to add.
2291  */
2292 void ModelChecker::add_action_to_lists(ModelAction *act)
2293 {
2294         int tid = id_to_int(act->get_tid());
2295         ModelAction *uninit = NULL;
2296         int uninit_id = -1;
2297         action_list_t *list = get_safe_ptr_action(obj_map, act->get_location());
2298         if (list->empty() && act->is_atomic_var()) {
2299                 uninit = new_uninitialized_action(act->get_location());
2300                 uninit_id = id_to_int(uninit->get_tid());
2301                 list->push_back(uninit);
2302         }
2303         list->push_back(act);
2304
2305         action_trace->push_back(act);
2306         if (uninit)
2307                 action_trace->push_front(uninit);
2308
2309         std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, act->get_location());
2310         if (tid >= (int)vec->size())
2311                 vec->resize(priv->next_thread_id);
2312         (*vec)[tid].push_back(act);
2313         if (uninit)
2314                 (*vec)[uninit_id].push_front(uninit);
2315
2316         if ((int)thrd_last_action->size() <= tid)
2317                 thrd_last_action->resize(get_num_threads());
2318         (*thrd_last_action)[tid] = act;
2319         if (uninit)
2320                 (*thrd_last_action)[uninit_id] = uninit;
2321
2322         if (act->is_fence() && act->is_release()) {
2323                 if ((int)thrd_last_fence_release->size() <= tid)
2324                         thrd_last_fence_release->resize(get_num_threads());
2325                 (*thrd_last_fence_release)[tid] = act;
2326         }
2327
2328         if (act->is_wait()) {
2329                 void *mutex_loc = (void *) act->get_value();
2330                 get_safe_ptr_action(obj_map, mutex_loc)->push_back(act);
2331
2332                 std::vector<action_list_t> *vec = get_safe_ptr_vect_action(obj_thrd_map, mutex_loc);
2333                 if (tid >= (int)vec->size())
2334                         vec->resize(priv->next_thread_id);
2335                 (*vec)[tid].push_back(act);
2336         }
2337 }
2338
2339 /**
2340  * @brief Get the last action performed by a particular Thread
2341  * @param tid The thread ID of the Thread in question
2342  * @return The last action in the thread
2343  */
2344 ModelAction * ModelChecker::get_last_action(thread_id_t tid) const
2345 {
2346         int threadid = id_to_int(tid);
2347         if (threadid < (int)thrd_last_action->size())
2348                 return (*thrd_last_action)[id_to_int(tid)];
2349         else
2350                 return NULL;
2351 }
2352
2353 /**
2354  * @brief Get the last fence release performed by a particular Thread
2355  * @param tid The thread ID of the Thread in question
2356  * @return The last fence release in the thread, if one exists; NULL otherwise
2357  */
2358 ModelAction * ModelChecker::get_last_fence_release(thread_id_t tid) const
2359 {
2360         int threadid = id_to_int(tid);
2361         if (threadid < (int)thrd_last_fence_release->size())
2362                 return (*thrd_last_fence_release)[id_to_int(tid)];
2363         else
2364                 return NULL;
2365 }
2366
2367 /**
2368  * Gets the last memory_order_seq_cst write (in the total global sequence)
2369  * performed on a particular object (i.e., memory location), not including the
2370  * current action.
2371  * @param curr The current ModelAction; also denotes the object location to
2372  * check
2373  * @return The last seq_cst write
2374  */
2375 ModelAction * ModelChecker::get_last_seq_cst_write(ModelAction *curr) const
2376 {
2377         void *location = curr->get_location();
2378         action_list_t *list = get_safe_ptr_action(obj_map, location);
2379         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2380         action_list_t::reverse_iterator rit;
2381         for (rit = list->rbegin(); rit != list->rend(); rit++)
2382                 if ((*rit)->is_write() && (*rit)->is_seqcst() && (*rit) != curr)
2383                         return *rit;
2384         return NULL;
2385 }
2386
2387 /**
2388  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2389  * performed in a particular thread, prior to a particular fence.
2390  * @param tid The ID of the thread to check
2391  * @param before_fence The fence from which to begin the search; if NULL, then
2392  * search for the most recent fence in the thread.
2393  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2394  */
2395 ModelAction * ModelChecker::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2396 {
2397         /* All fences should have NULL location */
2398         action_list_t *list = get_safe_ptr_action(obj_map, NULL);
2399         action_list_t::reverse_iterator rit = list->rbegin();
2400
2401         if (before_fence) {
2402                 for (; rit != list->rend(); rit++)
2403                         if (*rit == before_fence)
2404                                 break;
2405
2406                 ASSERT(*rit == before_fence);
2407                 rit++;
2408         }
2409
2410         for (; rit != list->rend(); rit++)
2411                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2412                         return *rit;
2413         return NULL;
2414 }
2415
2416 /**
2417  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2418  * location). This function identifies the mutex according to the current
2419  * action, which is presumed to perform on the same mutex.
2420  * @param curr The current ModelAction; also denotes the object location to
2421  * check
2422  * @return The last unlock operation
2423  */
2424 ModelAction * ModelChecker::get_last_unlock(ModelAction *curr) const
2425 {
2426         void *location = curr->get_location();
2427         action_list_t *list = get_safe_ptr_action(obj_map, location);
2428         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2429         action_list_t::reverse_iterator rit;
2430         for (rit = list->rbegin(); rit != list->rend(); rit++)
2431                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2432                         return *rit;
2433         return NULL;
2434 }
2435
2436 ModelAction * ModelChecker::get_parent_action(thread_id_t tid) const
2437 {
2438         ModelAction *parent = get_last_action(tid);
2439         if (!parent)
2440                 parent = get_thread(tid)->get_creation();
2441         return parent;
2442 }
2443
2444 /**
2445  * Returns the clock vector for a given thread.
2446  * @param tid The thread whose clock vector we want
2447  * @return Desired clock vector
2448  */
2449 ClockVector * ModelChecker::get_cv(thread_id_t tid) const
2450 {
2451         return get_parent_action(tid)->get_cv();
2452 }
2453
2454 /**
2455  * Resolve a set of Promises with a current write. The set is provided in the
2456  * Node corresponding to @a write.
2457  * @param write The ModelAction that is fulfilling Promises
2458  * @return True if promises were resolved; false otherwise
2459  */
2460 bool ModelChecker::resolve_promises(ModelAction *write)
2461 {
2462         bool haveResolved = false;
2463         std::vector< ModelAction *, ModelAlloc<ModelAction *> > actions_to_check;
2464         promise_list_t mustResolve, resolved;
2465
2466         for (unsigned int i = 0, promise_index = 0; promise_index < promises->size(); i++) {
2467                 Promise *promise = (*promises)[promise_index];
2468                 if (write->get_node()->get_promise(i)) {
2469                         for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2470                                 ModelAction *read = promise->get_reader(j);
2471                                 read_from(read, write);
2472                                 actions_to_check.push_back(read);
2473                         }
2474                         //Make sure the promise's value matches the write's value
2475                         ASSERT(promise->is_compatible(write));
2476                         mo_graph->resolvePromise(promise, write, &mustResolve);
2477
2478                         resolved.push_back(promise);
2479                         promises->erase(promises->begin() + promise_index);
2480
2481                         haveResolved = true;
2482                 } else
2483                         promise_index++;
2484         }
2485
2486         for (unsigned int i = 0; i < mustResolve.size(); i++) {
2487                 if (std::find(resolved.begin(), resolved.end(), mustResolve[i])
2488                                 == resolved.end())
2489                         priv->failed_promise = true;
2490         }
2491         for (unsigned int i = 0; i < resolved.size(); i++)
2492                 delete resolved[i];
2493         //Check whether reading these writes has made threads unable to
2494         //resolve promises
2495
2496         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2497                 ModelAction *read = actions_to_check[i];
2498                 mo_check_promises(read, true);
2499         }
2500
2501         return haveResolved;
2502 }
2503
2504 /**
2505  * Compute the set of promises that could potentially be satisfied by this
2506  * action. Note that the set computation actually appears in the Node, not in
2507  * ModelChecker.
2508  * @param curr The ModelAction that may satisfy promises
2509  */
2510 void ModelChecker::compute_promises(ModelAction *curr)
2511 {
2512         for (unsigned int i = 0; i < promises->size(); i++) {
2513                 Promise *promise = (*promises)[i];
2514                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2515                         continue;
2516
2517                 bool satisfy = true;
2518                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2519                         const ModelAction *act = promise->get_reader(j);
2520                         if (act->happens_before(curr) ||
2521                                         act->could_synchronize_with(curr)) {
2522                                 satisfy = false;
2523                                 break;
2524                         }
2525                 }
2526                 if (satisfy)
2527                         curr->get_node()->set_promise(i);
2528         }
2529 }
2530
2531 /** Checks promises in response to change in ClockVector Threads. */
2532 void ModelChecker::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2533 {
2534         for (unsigned int i = 0; i < promises->size(); i++) {
2535                 Promise *promise = (*promises)[i];
2536                 if (!promise->thread_is_available(tid))
2537                         continue;
2538                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2539                         const ModelAction *act = promise->get_reader(j);
2540                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2541                                         merge_cv->synchronized_since(act)) {
2542                                 if (promise->eliminate_thread(tid)) {
2543                                         /* Promise has failed */
2544                                         priv->failed_promise = true;
2545                                         return;
2546                                 }
2547                         }
2548                 }
2549         }
2550 }
2551
2552 void ModelChecker::check_promises_thread_disabled()
2553 {
2554         for (unsigned int i = 0; i < promises->size(); i++) {
2555                 Promise *promise = (*promises)[i];
2556                 if (promise->has_failed()) {
2557                         priv->failed_promise = true;
2558                         return;
2559                 }
2560         }
2561 }
2562
2563 /**
2564  * @brief Checks promises in response to addition to modification order for
2565  * threads.
2566  *
2567  * We test whether threads are still available for satisfying promises after an
2568  * addition to our modification order constraints. Those that are unavailable
2569  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2570  * that promise has failed.
2571  *
2572  * @param act The ModelAction which updated the modification order
2573  * @param is_read_check Should be true if act is a read and we must check for
2574  * updates to the store from which it read (there is a distinction here for
2575  * RMW's, which are both a load and a store)
2576  */
2577 void ModelChecker::mo_check_promises(const ModelAction *act, bool is_read_check)
2578 {
2579         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2580
2581         for (unsigned int i = 0; i < promises->size(); i++) {
2582                 Promise *promise = (*promises)[i];
2583
2584                 // Is this promise on the same location?
2585                 if (!promise->same_location(write))
2586                         continue;
2587
2588                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2589                         const ModelAction *pread = promise->get_reader(j);
2590                         if (!pread->happens_before(act))
2591                                continue;
2592                         if (mo_graph->checkPromise(write, promise)) {
2593                                 priv->failed_promise = true;
2594                                 return;
2595                         }
2596                         break;
2597                 }
2598
2599                 // Don't do any lookups twice for the same thread
2600                 if (!promise->thread_is_available(act->get_tid()))
2601                         continue;
2602
2603                 if (mo_graph->checkReachable(promise, write)) {
2604                         if (mo_graph->checkPromise(write, promise)) {
2605                                 priv->failed_promise = true;
2606                                 return;
2607                         }
2608                 }
2609         }
2610 }
2611
2612 /**
2613  * Compute the set of writes that may break the current pending release
2614  * sequence. This information is extracted from previou release sequence
2615  * calculations.
2616  *
2617  * @param curr The current ModelAction. Must be a release sequence fixup
2618  * action.
2619  */
2620 void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
2621 {
2622         if (pending_rel_seqs->empty())
2623                 return;
2624
2625         struct release_seq *pending = pending_rel_seqs->back();
2626         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2627                 const ModelAction *write = pending->writes[i];
2628                 curr->get_node()->add_relseq_break(write);
2629         }
2630
2631         /* NULL means don't break the sequence; just synchronize */
2632         curr->get_node()->add_relseq_break(NULL);
2633 }
2634
2635 /**
2636  * Build up an initial set of all past writes that this 'read' action may read
2637  * from, as well as any previously-observed future values that must still be valid.
2638  *
2639  * @param curr is the current ModelAction that we are exploring; it must be a
2640  * 'read' operation.
2641  */
2642 void ModelChecker::build_may_read_from(ModelAction *curr)
2643 {
2644         std::vector<action_list_t> *thrd_lists = get_safe_ptr_vect_action(obj_thrd_map, curr->get_location());
2645         unsigned int i;
2646         ASSERT(curr->is_read());
2647
2648         ModelAction *last_sc_write = NULL;
2649
2650         if (curr->is_seqcst())
2651                 last_sc_write = get_last_seq_cst_write(curr);
2652
2653         /* Iterate over all threads */
2654         for (i = 0; i < thrd_lists->size(); i++) {
2655                 /* Iterate over actions in thread, starting from most recent */
2656                 action_list_t *list = &(*thrd_lists)[i];
2657                 action_list_t::reverse_iterator rit;
2658                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2659                         ModelAction *act = *rit;
2660
2661                         /* Only consider 'write' actions */
2662                         if (!act->is_write() || act == curr)
2663                                 continue;
2664
2665                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2666                         bool allow_read = true;
2667
2668                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2669                                 allow_read = false;
2670                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2671                                 allow_read = false;
2672
2673                         if (allow_read) {
2674                                 /* Only add feasible reads */
2675                                 mo_graph->startChanges();
2676                                 r_modification_order(curr, act);
2677                                 if (!is_infeasible())
2678                                         curr->get_node()->add_read_from_past(act);
2679                                 mo_graph->rollbackChanges();
2680                         }
2681
2682                         /* Include at most one act per-thread that "happens before" curr */
2683                         if (act->happens_before(curr))
2684                                 break;
2685                 }
2686         }
2687
2688         /* Inherit existing, promised future values */
2689         for (i = 0; i < promises->size(); i++) {
2690                 const Promise *promise = (*promises)[i];
2691                 const ModelAction *promise_read = promise->get_reader(0);
2692                 if (promise_read->same_var(curr)) {
2693                         /* Only add feasible future-values */
2694                         mo_graph->startChanges();
2695                         r_modification_order(curr, promise);
2696                         if (!is_infeasible())
2697                                 curr->get_node()->add_read_from_promise(promise_read);
2698                         mo_graph->rollbackChanges();
2699                 }
2700         }
2701
2702         /* We may find no valid may-read-from only if the execution is doomed */
2703         if (!curr->get_node()->read_from_size()) {
2704                 priv->no_valid_reads = true;
2705                 set_assert();
2706         }
2707
2708         if (DBG_ENABLED()) {
2709                 model_print("Reached read action:\n");
2710                 curr->print();
2711                 model_print("Printing read_from_past\n");
2712                 curr->get_node()->print_read_from_past();
2713                 model_print("End printing read_from_past\n");
2714         }
2715 }
2716
2717 bool ModelChecker::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2718 {
2719         for ( ; write != NULL; write = write->get_reads_from()) {
2720                 /* UNINIT actions don't have a Node, and they never sleep */
2721                 if (write->is_uninitialized())
2722                         return true;
2723                 Node *prevnode = write->get_node()->get_parent();
2724
2725                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2726                 if (write->is_release() && thread_sleep)
2727                         return true;
2728                 if (!write->is_rmw())
2729                         return false;
2730         }
2731         return true;
2732 }
2733
2734 /**
2735  * @brief Create a new action representing an uninitialized atomic
2736  * @param location The memory location of the atomic object
2737  * @return A pointer to a new ModelAction
2738  */
2739 ModelAction * ModelChecker::new_uninitialized_action(void *location) const
2740 {
2741         ModelAction *act = (ModelAction *)snapshot_malloc(sizeof(class ModelAction));
2742         act = new (act) ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, location, 0, model_thread);
2743         act->create_cv(NULL);
2744         return act;
2745 }
2746
2747 static void print_list(action_list_t *list)
2748 {
2749         action_list_t::iterator it;
2750
2751         model_print("---------------------------------------------------------------------\n");
2752
2753         unsigned int hash = 0;
2754
2755         for (it = list->begin(); it != list->end(); it++) {
2756                 (*it)->print();
2757                 hash = hash^(hash<<3)^((*it)->hash());
2758         }
2759         model_print("HASH %u\n", hash);
2760         model_print("---------------------------------------------------------------------\n");
2761 }
2762
2763 #if SUPPORT_MOD_ORDER_DUMP
2764 void ModelChecker::dumpGraph(char *filename) const
2765 {
2766         char buffer[200];
2767         sprintf(buffer, "%s.dot", filename);
2768         FILE *file = fopen(buffer, "w");
2769         fprintf(file, "digraph %s {\n", filename);
2770         mo_graph->dumpNodes(file);
2771         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2772
2773         for (action_list_t::iterator it = action_trace->begin(); it != action_trace->end(); it++) {
2774                 ModelAction *act = *it;
2775                 if (act->is_read()) {
2776                         mo_graph->dot_print_node(file, act);
2777                         if (act->get_reads_from())
2778                                 mo_graph->dot_print_edge(file,
2779                                                 act->get_reads_from(),
2780                                                 act,
2781                                                 "label=\"rf\", color=red, weight=2");
2782                         else
2783                                 mo_graph->dot_print_edge(file,
2784                                                 act->get_reads_from_promise(),
2785                                                 act,
2786                                                 "label=\"rf\", color=red");
2787                 }
2788                 if (thread_array[act->get_tid()]) {
2789                         mo_graph->dot_print_edge(file,
2790                                         thread_array[id_to_int(act->get_tid())],
2791                                         act,
2792                                         "label=\"sb\", color=blue, weight=400");
2793                 }
2794
2795                 thread_array[act->get_tid()] = act;
2796         }
2797         fprintf(file, "}\n");
2798         model_free(thread_array);
2799         fclose(file);
2800 }
2801 #endif
2802
2803 /** @brief Prints an execution trace summary. */
2804 void ModelChecker::print_summary() const
2805 {
2806 #if SUPPORT_MOD_ORDER_DUMP
2807         char buffername[100];
2808         sprintf(buffername, "exec%04u", stats.num_total);
2809         mo_graph->dumpGraphToFile(buffername);
2810         sprintf(buffername, "graph%04u", stats.num_total);
2811         dumpGraph(buffername);
2812 #endif
2813
2814         model_print("Execution %d:", stats.num_total);
2815         if (isfeasibleprefix()) {
2816                 if (scheduler->all_threads_sleeping())
2817                         model_print(" SLEEP-SET REDUNDANT");
2818                 model_print("\n");
2819         } else
2820                 print_infeasibility(" INFEASIBLE");
2821         print_list(action_trace);
2822         model_print("\n");
2823 }
2824
2825 /**
2826  * Add a Thread to the system for the first time. Should only be called once
2827  * per thread.
2828  * @param t The Thread to add
2829  */
2830 void ModelChecker::add_thread(Thread *t)
2831 {
2832         thread_map->put(id_to_int(t->get_id()), t);
2833         scheduler->add_thread(t);
2834 }
2835
2836 /**
2837  * Removes a thread from the scheduler.
2838  * @param the thread to remove.
2839  */
2840 void ModelChecker::remove_thread(Thread *t)
2841 {
2842         scheduler->remove_thread(t);
2843 }
2844
2845 /**
2846  * @brief Get a Thread reference by its ID
2847  * @param tid The Thread's ID
2848  * @return A Thread reference
2849  */
2850 Thread * ModelChecker::get_thread(thread_id_t tid) const
2851 {
2852         return thread_map->get(id_to_int(tid));
2853 }
2854
2855 /**
2856  * @brief Get a reference to the Thread in which a ModelAction was executed
2857  * @param act The ModelAction
2858  * @return A Thread reference
2859  */
2860 Thread * ModelChecker::get_thread(const ModelAction *act) const
2861 {
2862         return get_thread(act->get_tid());
2863 }
2864
2865 /**
2866  * @brief Get a Promise's "promise number"
2867  *
2868  * A "promise number" is an index number that is unique to a promise, valid
2869  * only for a specific snapshot of an execution trace. Promises may come and go
2870  * as they are generated an resolved, so an index only retains meaning for the
2871  * current snapshot.
2872  *
2873  * @param promise The Promise to check
2874  * @return The promise index, if the promise still is valid; otherwise -1
2875  */
2876 int ModelChecker::get_promise_number(const Promise *promise) const
2877 {
2878         for (unsigned int i = 0; i < promises->size(); i++)
2879                 if ((*promises)[i] == promise)
2880                         return i;
2881         /* Not found */
2882         return -1;
2883 }
2884
2885 /**
2886  * @brief Check if a Thread is currently enabled
2887  * @param t The Thread to check
2888  * @return True if the Thread is currently enabled
2889  */
2890 bool ModelChecker::is_enabled(Thread *t) const
2891 {
2892         return scheduler->is_enabled(t);
2893 }
2894
2895 /**
2896  * @brief Check if a Thread is currently enabled
2897  * @param tid The ID of the Thread to check
2898  * @return True if the Thread is currently enabled
2899  */
2900 bool ModelChecker::is_enabled(thread_id_t tid) const
2901 {
2902         return scheduler->is_enabled(tid);
2903 }
2904
2905 /**
2906  * Switch from a model-checker context to a user-thread context. This is the
2907  * complement of ModelChecker::switch_to_master and must be called from the
2908  * model-checker context
2909  *
2910  * @param thread The user-thread to switch to
2911  */
2912 void ModelChecker::switch_from_master(Thread *thread)
2913 {
2914         scheduler->set_current_thread(thread);
2915         Thread::swap(&system_context, thread);
2916 }
2917
2918 /**
2919  * Switch from a user-context to the "master thread" context (a.k.a. system
2920  * context). This switch is made with the intention of exploring a particular
2921  * model-checking action (described by a ModelAction object). Must be called
2922  * from a user-thread context.
2923  *
2924  * @param act The current action that will be explored. May be NULL only if
2925  * trace is exiting via an assertion (see ModelChecker::set_assert and
2926  * ModelChecker::has_asserted).
2927  * @return Return the value returned by the current action
2928  */
2929 uint64_t ModelChecker::switch_to_master(ModelAction *act)
2930 {
2931         DBG();
2932         Thread *old = thread_current();
2933         ASSERT(!old->get_pending());
2934         old->set_pending(act);
2935         if (Thread::swap(old, &system_context) < 0) {
2936                 perror("swap threads");
2937                 exit(EXIT_FAILURE);
2938         }
2939         return old->get_return_value();
2940 }
2941
2942 /**
2943  * Takes the next step in the execution, if possible.
2944  * @param curr The current step to take
2945  * @return Returns the next Thread to run, if any; NULL if this execution
2946  * should terminate
2947  */
2948 Thread * ModelChecker::take_step(ModelAction *curr)
2949 {
2950         Thread *curr_thrd = get_thread(curr);
2951         ASSERT(curr_thrd->get_state() == THREAD_READY);
2952
2953         curr = check_current_action(curr);
2954
2955         /* Infeasible -> don't take any more steps */
2956         if (is_infeasible())
2957                 return NULL;
2958         else if (isfeasibleprefix() && have_bug_reports()) {
2959                 set_assert();
2960                 return NULL;
2961         }
2962
2963         if (params.bound != 0 && priv->used_sequence_numbers > params.bound)
2964                 return NULL;
2965
2966         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2967                 scheduler->remove_thread(curr_thrd);
2968
2969         Thread *next_thrd = get_next_thread(curr);
2970
2971         DEBUG("(%d, %d)\n", curr_thrd ? id_to_int(curr_thrd->get_id()) : -1,
2972                         next_thrd ? id_to_int(next_thrd->get_id()) : -1);
2973
2974         return next_thrd;
2975 }
2976
2977 /** Wrapper to run the user's main function, with appropriate arguments */
2978 void user_main_wrapper(void *)
2979 {
2980         user_main(model->params.argc, model->params.argv);
2981 }
2982
2983 /** @brief Run ModelChecker for the user program */
2984 void ModelChecker::run()
2985 {
2986         do {
2987                 thrd_t user_thread;
2988                 Thread *t = new Thread(&user_thread, &user_main_wrapper, NULL, NULL);
2989                 add_thread(t);
2990
2991                 do {
2992                         /*
2993                          * Stash next pending action(s) for thread(s). There
2994                          * should only need to stash one thread's action--the
2995                          * thread which just took a step--plus the first step
2996                          * for any newly-created thread
2997                          */
2998                         for (unsigned int i = 0; i < get_num_threads(); i++) {
2999                                 thread_id_t tid = int_to_id(i);
3000                                 Thread *thr = get_thread(tid);
3001                                 if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
3002                                         switch_from_master(thr);
3003                                 }
3004                         }
3005
3006                         /* Catch assertions from prior take_step or from
3007                          * between-ModelAction bugs (e.g., data races) */
3008                         if (has_asserted())
3009                                 break;
3010
3011                         /* Consume the next action for a Thread */
3012                         ModelAction *curr = t->get_pending();
3013                         t->set_pending(NULL);
3014                         t = take_step(curr);
3015                 } while (t && !t->is_model_thread());
3016
3017                 /*
3018                  * Launch end-of-execution release sequence fixups only when
3019                  * the execution is otherwise feasible AND there are:
3020                  *
3021                  * (1) pending release sequences
3022                  * (2) pending assertions that could be invalidated by a change
3023                  * in clock vectors (i.e., data races)
3024                  * (3) no pending promises
3025                  */
3026                 while (!pending_rel_seqs->empty() &&
3027                                 is_feasible_prefix_ignore_relseq() &&
3028                                 !unrealizedraces.empty()) {
3029                         model_print("*** WARNING: release sequence fixup action "
3030                                         "(%zu pending release seuqence(s)) ***\n",
3031                                         pending_rel_seqs->size());
3032                         ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
3033                                         std::memory_order_seq_cst, NULL, VALUE_NONE,
3034                                         model_thread);
3035                         take_step(fixup);
3036                 };
3037         } while (next_execution());
3038
3039         model_print("******* Model-checking complete: *******\n");
3040         print_stats();
3041 }