include common.mk
+SCFENCE_DIR := scfence
+
OBJECTS := libthreads.o schedule.o model.o threads.o librace.o action.o \
nodestack.o clockvector.o main.o snapshot-interface.o cyclegraph.o \
datarace.o impatomic.o cmodelint.o \
snapshot.o malloc.o mymemory.o common.o mutex.o promise.o conditionvariable.o \
context.o scanalysis.o execution.o plugins.o libannotate.o
-CPPFLAGS += -Iinclude -I.
+CPPFLAGS += -Iinclude -I. -I$(SCFENCE_DIR)
LDFLAGS := -ldl -lrt -rdynamic
SHARED := -shared
README.html: README.md
$(MARKDOWN) $< > $@
-$(LIB_SO): $(OBJECTS)
- $(CXX) $(SHARED) -o $(LIB_SO) $+ $(LDFLAGS)
malloc.o: malloc.c
$(CC) -fPIC -c malloc.c -DMSPACES -DONLY_MSPACES -DHAVE_MMAP=0 $(CPPFLAGS) -Wno-unused-variable
-%.o: %.cc
+%.o : %.cc
$(CXX) -MMD -MF .$@.d -fPIC -c $< $(CPPFLAGS)
+include $(SCFENCE_DIR)/Makefile
+
+-include $(wildcard $(SCFENCE_DIR)/.*.d)
+
+$(LIB_SO): $(OBJECTS)
+ $(CXX) $(SHARED) -o $(LIB_SO) $+ $(LDFLAGS)
+
%.pdf: %.dot
dot -Tpdf $< -o $@
PHONY += clean
clean:
- rm -f *.o *.so .*.d *.pdf *.dot
+ rm -f *.o *.so .*.d *.pdf *.dot $(SCFENCE_DIR)/.*.d $(SCFENCE_DIR)/*.o
$(MAKE) -C $(TESTS_DIR) clean
PHONY += mrclean
#include "common.h"
#include "threads-model.h"
#include "nodestack.h"
+#include "wildcard.h"
#define ACTION_INITIAL_CLOCK 0
uint64_t value, Thread *thread) :
type(type),
order(order),
+ original_order(order),
location(loc),
value(value),
reads_from(NULL),
case ATOMIC_WAIT: return "wait";
case ATOMIC_NOTIFY_ONE: return "notify one";
case ATOMIC_NOTIFY_ALL: return "notify all";
- case ATOMIC_ANNOTATION: return "atomic annotation";
+ case ATOMIC_ANNOTATION: return "annotation";
default: return "unknown type";
};
}
thread_id_t get_tid() const { return tid; }
action_type get_type() const { return type; }
memory_order get_mo() const { return order; }
+ memory_order get_original_mo() const { return original_order; }
+ void set_mo(memory_order order) { this->order = order; }
void * get_location() const { return location; }
modelclock_t get_seq_number() const { return seq_number; }
uint64_t get_value() const { return value; }
/** @brief The memory order for this operation. */
memory_order order;
+ /** @brief The original memory order parameter for this operation. */
+ memory_order original_order;
+
/** @brief A pointer to the memory location for this action. */
void *location;
action_list_t * get_action_trace() { return &action_trace; }
+ CycleGraph * const get_mo_graph() { return mo_graph; }
+
SNAPSHOTALLOC
private:
int get_execution_number() const;
--- /dev/null
+#ifndef _WILDCARD_H
+#define _WILDCARD_H
+#include "memoryorder.h"
+
+#define MAX_WILDCARD_NUM 50
+
+#define memory_order_normal ((memory_order) (0x2000))
+
+#define memory_order_wildcard(x) ((memory_order) (0x1000+x))
+
+#define wildcard(x) ((memory_order) (0x1000+x))
+#define get_wildcard_id(x) ((int) (x-0x1000))
+#define get_wildcard_id_zero(x) ((get_wildcard_id(x)) > 0 ? get_wildcard_id(x) : 0)
+
+#define INIT_WILDCARD_NUM 20
+#define WILDCARD_NONEXIST (memory_order) -1
+#define INFERENCE_INCOMPARABLE(x) (!(-1 <= (x) <= 1))
+
+#define is_wildcard(x) (!(x >= memory_order_relaxed && x <= memory_order_seq_cst) && x != memory_order_normal)
+#define is_normal_mo_infer(x) ((x >= memory_order_relaxed && x <= memory_order_seq_cst) || x == WILDCARD_NONEXIST || x == memory_order_normal)
+#define is_normal_mo(x) ((x >= memory_order_relaxed && x <= memory_order_seq_cst) || x == memory_order_normal)
+
+#define assert_infer(x) for (int i = 0; i <= wildcardNum; i++)\
+ ASSERT(is_normal_mo_infer((x[i])));
+
+#define assert_infers(x) for (ModelList<memory_order *>::iterator iter =\
+ (x)->begin(); iter != (x)->end(); iter++)\
+ assert_infer((*iter));
+
+#define relaxed memory_order_relaxed
+#define release memory_order_release
+#define acquire memory_order_acquire
+#define seqcst memory_order_seq_cst
+#define acqrel memory_order_acq_rel
+
+#endif
TraceAnalysis * ta=(*installedanalysis)[i];
ta->setExecution(execution);
model->add_trace_analysis(ta);
+ /** Call the installation event for each installed plugin */
+ ta->actionAtInstallation();
}
}
ModelChecker::ModelChecker(struct model_params params) :
/* Initialize default scheduler */
params(params),
+ restart_flag(false),
+ exit_flag(false),
scheduler(new Scheduler()),
node_stack(new NodeStack()),
execution(new ModelExecution(this, &this->params, scheduler, node_stack)),
execution_number(1),
diverge(NULL),
earliest_diverge(NULL),
- trace_analyses()
+ trace_analyses(),
+ inspect_plugin(NULL)
{
memset(&stats,0,sizeof(struct execution_stats));
}
checkDataRaces();
run_trace_analyses();
+ } else if (inspect_plugin && !execution->is_complete_execution() &&
+ (execution->too_many_steps())) {
+ inspect_plugin->analyze(execution->get_action_trace());
}
record_stats();
if (complete)
earliest_diverge = NULL;
+ if (restart_flag) {
+ do_restart();
+ return true;
+ }
+
+ if (exit_flag)
+ return false;
+
if ((diverge = execution->get_next_backtrack()) == NULL)
return false;
Thread *old = thread_current();
scheduler->set_current_thread(NULL);
ASSERT(!old->get_pending());
+ if (inspect_plugin != NULL) {
+ inspect_plugin->inspectModelAction(act);
+ }
old->set_pending(act);
if (Thread::swap(old, &system_context) < 0) {
perror("swap threads");
return false;
}
+/** @brief Exit ModelChecker upon returning to the run loop of the
+ * model checker. */
+void ModelChecker::exit_model_checker()
+{
+ exit_flag = true;
+}
+
+/** @brief Restart ModelChecker upon returning to the run loop of the
+ * model checker. */
+void ModelChecker::restart()
+{
+ restart_flag = true;
+}
+
+void ModelChecker::do_restart()
+{
+ restart_flag = false;
+ diverge = NULL;
+ earliest_diverge = NULL;
+ reset_to_initial_state();
+ node_stack->full_reset();
+ memset(&stats,0,sizeof(struct execution_stats));
+ execution_number = 1;
+}
+
/** @brief Run ModelChecker for the user program */
void ModelChecker::run()
{
+ bool has_next;
do {
thrd_t user_thread;
Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL);
t = execution->take_step(curr);
} while (!should_terminate_execution());
- } while (next_execution());
+ has_next = next_execution();
+ if (inspect_plugin != NULL && !has_next) {
+ inspect_plugin->actionAtModelCheckingFinish();
+ // Check if the inpect plugin set the restart flag
+ if (restart_flag) {
+ model_print("******* Model-checking RESTART: *******\n");
+ has_next = true;
+ do_restart();
+ }
+ }
+ } while (has_next);
execution->fixup_release_sequences();
void run();
+ /** Restart the model checker, intended for pluggins. */
+ void restart();
+
+ /** Exit the model checker, intended for pluggins. */
+ void exit_model_checker();
+
+ /** Check the exit_flag. */
+ bool get_exit_flag() const { return exit_flag; }
+
/** @returns the context for the main model-checking system thread */
ucontext_t * get_system_context() { return &system_context; }
void assert_user_bug(const char *msg);
const model_params params;
- void add_trace_analysis(TraceAnalysis *a) {
- trace_analyses.push_back(a);
- }
-
+ void add_trace_analysis(TraceAnalysis *a) { trace_analyses.push_back(a); }
+ void set_inspect_plugin(TraceAnalysis *a) { inspect_plugin=a; }
MEMALLOC
private:
+ /** Flag indicates whether to restart the model checker. */
+ bool restart_flag;
+ /** Flag indicates whether to exit the model checker. */
+ bool exit_flag;
+
/** The scheduler to use: tracks the running/ready Threads */
Scheduler * const scheduler;
NodeStack * const node_stack;
ModelVector<TraceAnalysis *> trace_analyses;
+ /** @bref Implement restart. */
+ void do_restart();
+ /** @bref Plugin that can inspect new actions. */
+ TraceAnalysis *inspect_plugin;
/** @brief The cumulative execution stats */
struct execution_stats stats;
void record_stats();
node_list.back()->clear_backtracking();
}
+/** Reset the node stack. */
+void NodeStack::full_reset()
+{
+ for (unsigned int i = 0; i < node_list.size(); i++)
+ delete node_list[i];
+ node_list.clear();
+ reset_execution();
+ total_nodes = 1;
+}
+
Node * NodeStack::get_head() const
{
if (node_list.empty() || head_idx < 0)
Node * get_next() const;
void reset_execution();
void pop_restofstack(int numAhead);
+ void full_reset();
int get_total_nodes() { return total_nodes; }
void print() const;
#include "plugins.h"
#include "scanalysis.h"
+#include "scfence.h"
ModelVector<TraceAnalysis *> * registered_analysis;
ModelVector<TraceAnalysis *> * installed_analysis;
registered_analysis=new ModelVector<TraceAnalysis *>();
installed_analysis=new ModelVector<TraceAnalysis *>();
registered_analysis->push_back(new SCAnalysis());
+ registered_analysis->push_back(new SCFence());
}
ModelVector<TraceAnalysis *> * getRegisteredTraceAnalysis() {
--- /dev/null
+FENCE_OBJS := $(SCFENCE_DIR)/scgen.o \
+ $(SCFENCE_DIR)/inference.o \
+ $(SCFENCE_DIR)/inferset.o \
+ $(SCFENCE_DIR)/inferlist.o \
+ $(SCFENCE_DIR)/patch.o \
+ $(SCFENCE_DIR)/scfence.o
+
+$(FENCE_OBJS): $(SCFENCE_DIR)/%.o : $(SCFENCE_DIR)/%.cc $(SCFENCE_DIR)/%.h
+ $(CXX) -MMD -MF $(SCFENCE_DIR)/.$(notdir $@).d -fPIC -c $< $(CPPFLAGS) -o $@
+
+OBJECTS += $(FENCE_OBJS)
--- /dev/null
+#ifndef _FENCE_COMMON_
+#define _FENCE_COMMON_
+
+#include "model.h"
+#include "action.h"
+
+#define DEFAULT_REPETITIVE_READ_BOUND 20
+
+#define FENCE_OUTPUT
+
+#ifdef FENCE_OUTPUT
+
+#define FENCE_PRINT model_print
+
+#define ACT_PRINT(x) (x)->print()
+
+#define CV_PRINT(x) (x)->print()
+
+#define WILDCARD_ACT_PRINT(x)\
+ FENCE_PRINT("Wildcard: %d\n", get_wildcard_id_zero((x)->get_original_mo()));\
+ ACT_PRINT(x);
+
+#else
+
+#define FENCE_PRINT
+
+#define ACT_PRINT(x)
+
+#define CV_PRINT(x)
+
+#define WILDCARD_ACT_PRINT(x)
+
+#endif
+
+#endif
--- /dev/null
+#include "fence_common.h"
+#include "wildcard.h"
+#include "patch.h"
+#include "inferlist.h"
+#include "inference.h"
+
+/** Forward declaration */
+class PatchUnit;
+class Patch;
+class Inference;
+class InferenceList;
+
+bool isTheInference(Inference *infer) {
+ for (int i = 0; i < infer->getSize(); i++) {
+ memory_order mo1 = (*infer)[i], mo2;
+ if (mo1 == WILDCARD_NONEXIST)
+ mo1 = relaxed;
+ switch (i) {
+ case 3:
+ mo2 = acquire;
+ break;
+ case 11:
+ mo2 = release;
+ break;
+ default:
+ mo2 = relaxed;
+ break;
+ }
+ if (mo1 != mo2)
+ return false;
+ }
+ return true;
+}
+
+const char* get_mo_str(memory_order order) {
+ switch (order) {
+ case std::memory_order_relaxed: return "relaxed";
+ case std::memory_order_acquire: return "acquire";
+ case std::memory_order_release: return "release";
+ case std::memory_order_acq_rel: return "acq_rel";
+ case std::memory_order_seq_cst: return "seq_cst";
+ default:
+ //model_print("Weird memory order, a bug or something!\n");
+ //model_print("memory_order: %d\n", order);
+ return "unknown";
+ }
+}
+
+
+void Inference::resize(int newsize) {
+ ASSERT (newsize > size && newsize <= MAX_WILDCARD_NUM);
+ memory_order *newOrders = (memory_order *) model_malloc((newsize + 1) * sizeof(memory_order*));
+ int i;
+ for (i = 0; i <= size; i++)
+ newOrders[i] = orders[i];
+ for (; i <= newsize; i++)
+ newOrders[i] = WILDCARD_NONEXIST;
+ model_free(orders);
+ size = newsize;
+ orders = newOrders;
+}
+
+/** Return the state of how we update a specific mo; If we have to make an
+ * uncompatible inference or that inference cannot be imposed because it's
+ * not a wildcard, it returns -1; if it is a compatible memory order but the
+ * current memory order is no weaker than mo, it returns 0; otherwise, it
+ * does strengthen the order, and returns 1 */
+int Inference::strengthen(const ModelAction *act, memory_order mo) {
+ memory_order wildcard = act->get_original_mo();
+ int wildcardID = get_wildcard_id_zero(wildcard);
+ if (!is_wildcard(wildcard)) {
+ FENCE_PRINT("We cannot make this update to %s!\n", get_mo_str(mo));
+ ACT_PRINT(act);
+ return -1;
+ }
+ if (wildcardID > size)
+ resize(wildcardID);
+ ASSERT (is_normal_mo(mo));
+ //model_print("wildcardID -> order: %d -> %d\n", wildcardID, orders[wildcardID]);
+ ASSERT (is_normal_mo_infer(orders[wildcardID]));
+ switch (orders[wildcardID]) {
+ case memory_order_seq_cst:
+ return 0;
+ case memory_order_relaxed:
+ if (mo == memory_order_relaxed)
+ return 0;
+ orders[wildcardID] = mo;
+ break;
+ case memory_order_acquire:
+ if (mo == memory_order_acquire || mo == memory_order_relaxed)
+ return 0;
+ if (mo == memory_order_release)
+ orders[wildcardID] = memory_order_acq_rel;
+ else if (mo >= memory_order_acq_rel && mo <=
+ memory_order_seq_cst)
+ orders[wildcardID] = mo;
+ break;
+ case memory_order_release:
+ if (mo == memory_order_release || mo == memory_order_relaxed)
+ return 0;
+ if (mo == memory_order_acquire)
+ orders[wildcardID] = memory_order_acq_rel;
+ else if (mo >= memory_order_acq_rel)
+ orders[wildcardID] = mo;
+ break;
+ case memory_order_acq_rel:
+ if (mo == memory_order_seq_cst)
+ orders[wildcardID] = mo;
+ else
+ return 0;
+ break;
+ default:
+ orders[wildcardID] = mo;
+ break;
+ }
+ return 1;
+}
+
+Inference::Inference() {
+ orders = (memory_order *) model_malloc((4 + 1) * sizeof(memory_order*));
+ size = 4;
+ for (int i = 0; i <= size; i++)
+ orders[i] = WILDCARD_NONEXIST;
+ initialInfer = this;
+ buggy = false;
+ hasFixes = false;
+ leaf = false;
+ explored = false;
+ shouldFix = true;
+}
+
+Inference::Inference(Inference *infer) {
+ ASSERT (infer->size > 0 && infer->size <= MAX_WILDCARD_NUM);
+ orders = (memory_order *) model_malloc((infer->size + 1) * sizeof(memory_order*));
+ this->size = infer->size;
+ for (int i = 0; i <= size; i++)
+ orders[i] = infer->orders[i];
+
+ initialInfer = infer->initialInfer;
+ buggy = false;
+ hasFixes = false;
+ leaf = false;
+ explored = false;
+ shouldFix = true;
+}
+
+/** return value:
+ * 0 -> mo1 == mo2;
+ * 1 -> mo1 stronger than mo2;
+ * -1 -> mo1 weaker than mo2;
+ * 2 -> mo1 & mo2 are uncomparable.
+ */
+int Inference::compareMemoryOrder(memory_order mo1, memory_order mo2) {
+ if (mo1 == WILDCARD_NONEXIST)
+ mo1 = memory_order_relaxed;
+ if (mo2 == WILDCARD_NONEXIST)
+ mo2 = memory_order_relaxed;
+ if (mo1 == mo2)
+ return 0;
+ if (mo1 == memory_order_relaxed)
+ return -1;
+ if (mo1 == memory_order_acquire) {
+ if (mo2 == memory_order_relaxed)
+ return 1;
+ if (mo2 == memory_order_release)
+ return 2;
+ return -1;
+ }
+ if (mo1 == memory_order_release) {
+ if (mo2 == memory_order_relaxed)
+ return 1;
+ if (mo2 == memory_order_acquire)
+ return 2;
+ return -1;
+ }
+ if (mo1 == memory_order_acq_rel) {
+ if (mo2 == memory_order_seq_cst)
+ return -1;
+ else
+ return 1;
+ }
+ // mo1 now must be SC and mo2 can't be SC
+ return 1;
+}
+
+
+/** Try to calculate the set of inferences that are weaker than this, but
+ * still stronger than infer */
+InferenceList* Inference::getWeakerInferences(Inference *infer) {
+ // An array of strengthened wildcards
+ SnapVector<int> *strengthened = new SnapVector<int>;
+ model_print("Strengthened wildcards\n");
+ for (int i = 1; i <= size; i++) {
+ memory_order mo1 = orders[i],
+ mo2 = (*infer)[i];
+ int compVal = compareMemoryOrder(mo1, mo2);
+ if (!(compVal == 0 || compVal == 1)) {
+ model_print("assert failure\n");
+ model_print("compVal=%d\n", compVal);
+ ASSERT (false);
+ }
+ if (compVal == 0) // Same
+ continue;
+ model_print("wildcard %d -> %s (%s)\n", i, get_mo_str(mo1),
+ get_mo_str(mo2));
+ strengthened->push_back(i);
+ }
+
+ // Got the strengthened wildcards, find out weaker inferences
+ // First get a volatile copy of this inference
+ Inference *tmpRes = new Inference(this);
+ InferenceList *res = new InferenceList;
+ if (strengthened->size() == 0)
+ return res;
+ getWeakerInferences(res, tmpRes, this, infer, strengthened, 0);
+ res->pop_front();
+ res->pop_back();
+ InferenceList::print(res, "Weakened");
+ return res;
+}
+
+
+// seq_cst -> acq_rel -> acquire -> release -> relaxed
+memory_order Inference::nextWeakOrder(memory_order mo1, memory_order mo2) {
+ memory_order res;
+ switch (mo1) {
+ case memory_order_seq_cst:
+ res = memory_order_acq_rel;
+ break;
+ case memory_order_acq_rel:
+ res = memory_order_acquire;
+ break;
+ case memory_order_acquire:
+ res = memory_order_relaxed;
+ break;
+ case memory_order_release:
+ res = memory_order_relaxed;
+ break;
+ case memory_order_relaxed:
+ res = memory_order_relaxed;
+ break;
+ default:
+ res = memory_order_relaxed;
+ break;
+ }
+ int compVal = compareMemoryOrder(res, mo2);
+ if (compVal == 2 || compVal == -1) // Incomparable
+ res = mo2;
+ return res;
+}
+
+void Inference::getWeakerInferences(InferenceList* list, Inference *tmpRes,
+ Inference *infer1, Inference *infer2, SnapVector<int> *strengthened, unsigned idx) {
+ if (idx == strengthened->size()) { // Ready to produce one weakened result
+ Inference *res = new Inference(tmpRes);
+ //model_print("Weakened inference:\n");
+ //res->print();
+ res->setShouldFix(false);
+ list->push_back(res);
+ return;
+ }
+
+ int w = (*strengthened)[idx]; // The wildcard
+ memory_order mo1 = (*infer1)[w];
+ memory_order mo2 = (*infer2)[w];
+ if (mo2 == WILDCARD_NONEXIST)
+ mo2 = memory_order_relaxed;
+ memory_order weakenedMO = mo1;
+ do {
+ (*tmpRes)[w] = weakenedMO;
+ getWeakerInferences(list, tmpRes, infer1, infer2,
+ strengthened, idx + 1);
+ if (weakenedMO == memory_order_acq_rel) {
+ (*tmpRes)[w] = memory_order_release;
+ getWeakerInferences(list, tmpRes, infer1, infer2,
+ strengthened, idx + 1);
+ }
+ weakenedMO = nextWeakOrder(weakenedMO, mo2);
+ model_print("weakendedMO=%d\n", weakenedMO);
+ model_print("mo2=%d\n", mo2);
+ } while (weakenedMO != mo2);
+ (*tmpRes)[w] = weakenedMO;
+ getWeakerInferences(list, tmpRes, infer1, infer2,
+ strengthened, idx + 1);
+}
+
+memory_order& Inference::operator[](int idx) {
+ if (idx > 0 && idx <= size)
+ return orders[idx];
+ else {
+ resize(idx);
+ orders[idx] = WILDCARD_NONEXIST;
+ return orders[idx];
+ }
+}
+
+/** A simple overload, which allows caller to pass two boolean refrence, and
+ * we will set those two variables indicating whether we can update the
+ * order (copatible or not) and have updated the order */
+int Inference::strengthen(const ModelAction *act, memory_order mo, bool &canUpdate, bool &hasUpdated) {
+ int res = strengthen(act, mo);
+ if (res == -1)
+ canUpdate = false;
+ if (res == 1)
+ hasUpdated = true;
+
+ return res;
+}
+
+/** @Return:
+ 1 -> 'this> infer';
+ -1 -> 'this < infer'
+ 0 -> 'this == infer'
+ INFERENCE_INCOMPARABLE(x) -> true means incomparable
+ */
+int Inference::compareTo(const Inference *infer) const {
+ int result = size == infer->size ? 0 : (size > infer->size) ? 1 : -1;
+ int smallerSize = size > infer->size ? infer->size : size;
+ int subResult;
+
+ for (int i = 0; i <= smallerSize; i++) {
+ memory_order mo1 = orders[i],
+ mo2 = infer->orders[i];
+ if ((mo1 == memory_order_acquire && mo2 == memory_order_release) ||
+ (mo1 == memory_order_release && mo2 == memory_order_acquire)) {
+ // Incomparable
+ return -2;
+ } else {
+ if ((mo1 == WILDCARD_NONEXIST && mo2 != WILDCARD_NONEXIST)
+ || (mo1 == WILDCARD_NONEXIST && mo2 == memory_order_relaxed)
+ || (mo1 == memory_order_relaxed && mo2 == WILDCARD_NONEXIST)
+ )
+ subResult = 1;
+ else if (mo1 != WILDCARD_NONEXIST && mo2 == WILDCARD_NONEXIST)
+ subResult = -1;
+ else
+ subResult = mo1 > mo2 ? 1 : (mo1 == mo2) ? 0 : -1;
+
+ if ((subResult > 0 && result < 0) || (subResult < 0 && result > 0)) {
+ return -2;
+ }
+ if (subResult != 0)
+ result = subResult;
+ }
+ }
+ return result;
+}
+
+unsigned long Inference::getHash() {
+ unsigned long hash = 0;
+ for (int i = 1; i <= size; i++) {
+ memory_order mo = orders[i];
+ if (mo == WILDCARD_NONEXIST) {
+ mo = memory_order_relaxed;
+ }
+ hash *= 37;
+ hash += (mo + 4096);
+ }
+ return hash;
+}
+
+
+void Inference::print(bool hasHash) {
+ ASSERT(size > 0 && size <= MAX_WILDCARD_NUM);
+ for (int i = 1; i <= size; i++) {
+ memory_order mo = orders[i];
+ if (mo != WILDCARD_NONEXIST) {
+ // Print the wildcard inference result
+ FENCE_PRINT("wildcard %d -> memory_order_%s\n", i, get_mo_str(mo));
+ }
+ }
+ if (hasHash)
+ FENCE_PRINT("Hash: %lu\n", getHash());
+}
+
+void Inference::print() {
+ print(false);
+}
--- /dev/null
+#ifndef _INFERENCE_H
+#define _INFERENCE_H
+
+#include "fence_common.h"
+#include "wildcard.h"
+#include "patch.h"
+#include "inferlist.h"
+
+class InferenceList;
+class Inference;
+
+extern const char* get_mo_str(memory_order order);
+extern bool isTheInference(Inference *infer);
+
+class Inference {
+ private:
+ memory_order *orders;
+ int size;
+
+ /** It's initial inference, if not assigned, set it as itself */
+ Inference *initialInfer;
+
+ /** Whether this inference will lead to a buggy execution */
+ bool buggy;
+
+ /** Whether this inference has been explored */
+ bool explored;
+
+ /** Whether this inference is the leaf node in the inference lattice, see
+ * inferset.h for more details */
+ bool leaf;
+
+ /** When this inference will have buggy executions, this indicates whether
+ * it has any fixes. */
+ bool hasFixes;
+
+ /** When we have a strong enough inference, we also want to weaken specific
+ * parameters to see if it is possible to be weakened. So we will this
+ * field to mark if we should fix the inference or not if we get non-SC
+ * behaviros. By default true. */
+ bool shouldFix;
+
+ void resize(int newsize);
+
+ /** Return the state of how we update a specific mo; If we have to make an
+ * uncompatible inference or that inference cannot be imposed because it's
+ * not a wildcard, it returns -1; if it is a compatible memory order but the
+ * current memory order is no weaker than mo, it returns 0; otherwise, it
+ * does strengthen the order, and returns 1 */
+ int strengthen(const ModelAction *act, memory_order mo);
+
+ public:
+ Inference();
+
+ Inference(Inference *infer);
+
+ /** return value:
+ * 0 -> mo1 == mo2;
+ * 1 -> mo1 stronger than mo2;
+ * -1 -> mo1 weaker than mo2;
+ * 2 -> mo1 & mo2 are uncomparable.
+ */
+ static int compareMemoryOrder(memory_order mo1, memory_order mo2);
+
+
+ /** Try to calculate the set of inferences that are weaker than this, but
+ * still stronger than infer */
+ InferenceList* getWeakerInferences(Inference *infer);
+
+ static memory_order nextWeakOrder(memory_order mo1, memory_order mo2);
+
+ void getWeakerInferences(InferenceList* list, Inference *tmpRes, Inference *infer1,
+ Inference *infer2, SnapVector<int> *strengthened, unsigned idx);
+
+ int getSize() {
+ return size;
+ }
+
+ memory_order &operator[](int idx);
+
+ /** A simple overload, which allows caller to pass two boolean refrence, and
+ * we will set those two variables indicating whether we can update the
+ * order (copatible or not) and have updated the order */
+ int strengthen(const ModelAction *act, memory_order mo, bool &canUpdate,
+ bool &hasUpdated);
+
+ /** @Return:
+ 1 -> 'this> infer';
+ -1 -> 'this < infer'
+ 0 -> 'this == infer'
+ INFERENCE_INCOMPARABLE(x) -> true means incomparable
+ */
+ int compareTo(const Inference *infer) const;
+
+ void setInitialInfer(Inference *val) {
+ initialInfer = val;
+ }
+
+ Inference* getInitialInfer() {
+ return initialInfer;
+ }
+
+ void setShouldFix(bool val) {
+ shouldFix = val;
+ }
+
+ bool getShouldFix() {
+ return shouldFix;
+ }
+
+ void setHasFixes(bool val) {
+ hasFixes = val;
+ }
+
+ bool getHasFixes() {
+ return hasFixes;
+ }
+
+ void setBuggy(bool val) {
+ buggy = val;
+ }
+
+ bool getBuggy() {
+ return buggy;
+ }
+
+ void setExplored(bool val) {
+ explored = val;
+ }
+
+ bool isExplored() {
+ return explored;
+ }
+
+ void setLeaf(bool val) {
+ leaf = val;
+ }
+
+ bool isLeaf() {
+ return leaf;
+ }
+
+ unsigned long getHash();
+
+ void print();
+ void print(bool hasHash);
+
+ ~Inference() {
+ model_free(orders);
+ }
+
+ MEMALLOC
+};
+#endif
--- /dev/null
+#include "inferlist.h"
+
+InferenceList::InferenceList() {
+ list = new ModelList<Inference*>;
+}
+
+int InferenceList::getSize() {
+ return list->size();
+}
+
+void InferenceList::pop_back() {
+ list->pop_back();
+}
+
+Inference* InferenceList::back() {
+ return list->back();
+}
+
+void InferenceList::push_back(Inference *infer) {
+ list->push_back(infer);
+}
+
+void InferenceList::pop_front() {
+ list->pop_front();
+}
+
+/** We should not call this function too often because we want a nicer
+ * abstraction of the list of inferences. So far, it will only be called in
+ * the functions in InferenceSet */
+ModelList<Inference*>* InferenceList::getList() {
+ return list;
+}
+
+bool InferenceList::applyPatch(Inference *curInfer, Inference *newInfer, Patch *patch) {
+ bool canUpdate = true,
+ hasUpdated = false,
+ updateState = false;
+ for (int i = 0; i < patch->getSize(); i++) {
+ canUpdate = true;
+ hasUpdated = false;
+ PatchUnit *unit = patch->get(i);
+ newInfer->strengthen(unit->getAct(), unit->getMO(), canUpdate, hasUpdated);
+ if (!canUpdate) {
+ // This is not a feasible patch, bail
+ break;
+ } else if (hasUpdated) {
+ updateState = true;
+ }
+ }
+ if (updateState) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void InferenceList::applyPatch(Inference *curInfer, Patch* patch) {
+ if (list->empty()) {
+ Inference *newInfer = new Inference(curInfer);
+ if (!applyPatch(curInfer, newInfer, patch)) {
+ delete newInfer;
+ } else {
+ list->push_back(newInfer);
+ }
+ } else {
+ ModelList<Inference*> *newList = new ModelList<Inference*>;
+ for (ModelList<Inference*>::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ Inference *oldInfer = *it;
+ Inference *newInfer = new Inference(oldInfer);
+ if (!applyPatch(curInfer, newInfer, patch)) {
+ delete newInfer;
+ } else {
+ newList->push_back(newInfer);
+ }
+ }
+ // Clean the old list
+ for (ModelList<Inference*>::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ delete *it;
+ }
+ delete list;
+ list = newList;
+ }
+}
+
+void InferenceList::applyPatch(Inference *curInfer, SnapVector<Patch*> *patches) {
+ if (list->empty()) {
+ for (unsigned i = 0; i < patches->size(); i++) {
+ Inference *newInfer = new Inference(curInfer);
+ Patch *patch = (*patches)[i];
+ if (!applyPatch(curInfer, newInfer, patch)) {
+ delete newInfer;
+ } else {
+ list->push_back(newInfer);
+ }
+ }
+ } else {
+ ModelList<Inference*> *newList = new ModelList<Inference*>;
+ for (ModelList<Inference*>::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ Inference *oldInfer = *it;
+ for (unsigned i = 0; i < patches->size(); i++) {
+ Inference *newInfer = new Inference(oldInfer);
+ Patch *patch = (*patches)[i];
+ if (!applyPatch(curInfer, newInfer, patch)) {
+ delete newInfer;
+ } else {
+ newList->push_back(newInfer);
+ }
+ }
+ }
+ // Clean the old list
+ for (ModelList<Inference*>::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ delete *it;
+ }
+ delete list;
+ list = newList;
+ }
+}
+
+/** Append another list to this list */
+bool InferenceList::append(InferenceList *inferList) {
+ if (!inferList)
+ return false;
+ ModelList<Inference*> *l = inferList->list;
+ list->insert(list->end(), l->begin(), l->end());
+ return true;
+}
+
+/** Only choose the weakest existing candidates & they must be stronger than the
+ * current inference */
+void InferenceList::pruneCandidates(Inference *curInfer) {
+ ModelList<Inference*> *newCandidates = new ModelList<Inference*>(),
+ *candidates = list;
+
+ ModelList<Inference*>::iterator it1, it2;
+ int compVal;
+ for (it1 = candidates->begin(); it1 != candidates->end(); it1++) {
+ Inference *cand = *it1;
+ compVal = cand->compareTo(curInfer);
+ if (compVal == 0) {
+ // If as strong as curInfer, bail
+ delete cand;
+ continue;
+ }
+ // Check if the cand is any stronger than those in the newCandidates
+ for (it2 = newCandidates->begin(); it2 != newCandidates->end(); it2++) {
+ Inference *addedInfer = *it2;
+ compVal = addedInfer->compareTo(cand);
+ if (compVal == 0 || compVal == 1) { // Added inference is stronger
+ delete addedInfer;
+ it2 = newCandidates->erase(it2);
+ it2--;
+ }
+ }
+ // Now push the cand to the list
+ newCandidates->push_back(cand);
+ }
+ delete candidates;
+ list = newCandidates;
+}
+
+void InferenceList::clearAll() {
+ clearAll(list);
+}
+
+void InferenceList::clearList() {
+ delete list;
+}
+
+void InferenceList::clearAll(ModelList<Inference*> *l) {
+ for (ModelList<Inference*>::iterator it = l->begin(); it !=
+ l->end(); it++) {
+ Inference *infer = *it;
+ delete infer;
+ }
+ delete l;
+}
+
+void InferenceList::clearAll(InferenceList *inferList) {
+ clearAll(inferList->list);
+}
+
+void InferenceList::print(InferenceList *inferList, const char *msg) {
+ print(inferList->getList(), msg);
+}
+
+void InferenceList::print(ModelList<Inference*> *inferList, const char *msg) {
+ for (ModelList<Inference*>::iterator it = inferList->begin(); it !=
+ inferList->end(); it++) {
+ int idx = distance(inferList->begin(), it);
+ Inference *infer = *it;
+ model_print("%s %d:\n", msg, idx);
+ infer->print();
+ model_print("\n");
+ }
+}
+
+void InferenceList::print() {
+ print(list, "Inference");
+}
+
+void InferenceList::print(const char *msg) {
+ print(list, msg);
+}
--- /dev/null
+#ifndef _INFERLIST_H
+#define _INFERLIST_H
+
+#include "fence_common.h"
+#include "patch.h"
+#include "inference.h"
+
+class Patch;
+class PatchUnit;
+class Inference;
+
+/** This class represents that the list of inferences that can fix the problem
+ */
+class InferenceList {
+ private:
+ ModelList<Inference*> *list;
+
+ public:
+ InferenceList();
+ int getSize();
+ Inference* back();
+
+ /** We should not call this function too often because we want a nicer
+ * abstraction of the list of inferences. So far, it will only be called in
+ * the functions in InferenceSet */
+ ModelList<Inference*>* getList();
+ void push_back(Inference *infer);
+ void pop_front();
+ void pop_back();
+ bool applyPatch(Inference *curInfer, Inference *newInfer, Patch *patch);
+
+ void applyPatch(Inference *curInfer, Patch* patch);
+
+ void applyPatch(Inference *curInfer, SnapVector<Patch*> *patches);
+
+ /** Append another list to this list */
+ bool append(InferenceList *inferList);
+
+ /** Only choose the weakest existing candidates & they must be stronger than the
+ * current inference */
+ void pruneCandidates(Inference *curInfer);
+
+ void clearAll();
+
+ void clearList();
+
+ static void clearAll(ModelList<Inference*> *l);
+
+ static void clearAll(InferenceList *inferList);
+
+ static void print(ModelList<Inference*> *inferList, const char *msg);
+
+ static void print(InferenceList *inferList, const char *msg);
+
+ void print();
+
+ void print(const char *msg);
+
+ MEMALLOC
+
+};
+
+
+#endif
--- /dev/null
+#include "patch.h"
+#include "inference.h"
+#include "inferset.h"
+
+InferenceSet::InferenceSet() {
+ discoveredSet = new InferenceList;
+ results = new InferenceList;
+ candidates = new InferenceList;
+}
+
+/** Print the result of inferences */
+void InferenceSet::printResults() {
+ results->print("Result");
+ stat.print();
+}
+
+/** Print candidates of inferences */
+void InferenceSet::printCandidates() {
+ candidates->print("Candidate");
+}
+
+/** When we finish model checking or cannot further strenghen with the
+ * inference, we commit the inference to be explored; if it is feasible, we
+ * put it in the result list */
+void InferenceSet::commitInference(Inference *infer, bool feasible) {
+ ASSERT (infer);
+
+ infer->setExplored(true);
+ FENCE_PRINT("Explored %lu\n", infer->getHash());
+ if (feasible) {
+ addResult(infer);
+ }
+}
+
+int InferenceSet::getCandidatesSize() {
+ return candidates->getSize();
+}
+
+int InferenceSet::getResultsSize() {
+ return results->getSize();
+}
+
+/** Be careful that if the candidate is not added, it will be deleted in this
+ * function. Therefore, caller of this function should just delete the list when
+ * finishing calling this function. */
+bool InferenceSet::addCandidates(Inference *curInfer, InferenceList *inferList) {
+ if (!inferList)
+ return false;
+ // First prune the list of inference
+ inferList->pruneCandidates(curInfer);
+
+ ModelList<Inference*> *cands = inferList->getList();
+
+ // For the purpose of debugging, record all those inferences added here
+ InferenceList *addedCandidates = new InferenceList;
+ FENCE_PRINT("List size: %ld.\n", cands->size());
+ bool added = false;
+
+ /******** addCurInference ********/
+ // Add the current inference to the set, but specifially it marks it as
+ // non-leaf node so that when it gets popped, we just need to commit it as
+ // explored
+ addCurInference(curInfer);
+
+ ModelList<Inference*>::iterator it;
+ for (it = cands->begin(); it != cands->end(); it++) {
+ Inference *candidate = *it;
+ // Before adding those fixes, set its initial inference
+ candidate->setInitialInfer(curInfer->getInitialInfer());
+ bool tmpAdded = false;
+ /******** addInference ********/
+ tmpAdded = addInference(candidate);
+ if (tmpAdded) {
+ added = true;
+ it = cands->erase(it);
+ it--;
+ addedCandidates->push_back(candidate);
+ }
+ }
+
+ // For debugging, print the list of candidates for this iteration
+ FENCE_PRINT("Current inference:\n");
+ curInfer->print();
+ FENCE_PRINT("\n");
+ FENCE_PRINT("The added inferences:\n");
+ addedCandidates->print("Candidates");
+ FENCE_PRINT("\n");
+
+ // Clean up the candidates
+ inferList->clearList();
+ FENCE_PRINT("potential results size: %d.\n", candidates->getSize());
+ return added;
+}
+
+
+/** Check if we have stronger or equal inferences in the current result
+ * list; if we do, we remove them and add the passed-in parameter infer */
+ void InferenceSet::addResult(Inference *infer) {
+ ModelList<Inference*> *list = results->getList();
+ for (ModelList<Inference*>::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ Inference *existResult = *it;
+ int compVal = existResult->compareTo(infer);
+ if (compVal == 0 || compVal == 1) {
+ // The existing result is equal or stronger, remove it
+ FENCE_PRINT("We are dumping the follwing inference because it's either too weak or the same:\n");
+ existResult->print();
+ FENCE_PRINT("\n");
+ it = list->erase(it);
+ it--;
+ }
+ }
+ list->push_back(infer);
+ }
+
+/** Get the next available unexplored node; @Return NULL
+ * if we don't have next, meaning that we are done with exploring */
+Inference* InferenceSet::getNextInference() {
+ Inference *infer = NULL;
+ while (candidates->getSize() > 0) {
+ infer = candidates->back();
+ candidates->pop_back();
+ if (!infer->isLeaf()) {
+ commitInference(infer, false);
+ continue;
+ }
+ if (infer->getShouldFix() && hasBeenExplored(infer)) {
+ // Finish exploring this node
+ // Remove the node from the set
+ FENCE_PRINT("Explored inference:\n");
+ infer->print();
+ FENCE_PRINT("\n");
+ continue;
+ } else {
+ return infer;
+ }
+ }
+ return NULL;
+}
+
+/** Add the current inference to the set before adding fixes to it; in
+ * this case, fixes will be added afterwards, and infer should've been
+ * discovered */
+void InferenceSet::addCurInference(Inference *infer) {
+ infer->setLeaf(false);
+ candidates->push_back(infer);
+}
+
+/** Add one weaker node (the stronger one has been explored and known to be SC,
+ * we just want to know if a weaker one might also be SC).
+ */
+void InferenceSet::addWeakerInference(Inference *curInfer) {
+ Inference *initialInfer = curInfer->getInitialInfer();
+ model_print("Before adding weaker inferece, candidates size=%d\n",
+ candidates->getSize());
+ ModelList<Inference*> *list = discoveredSet->getList();
+
+ // An array of strengthened wildcards
+ SnapVector<int> *strengthened = new SnapVector<int>;
+ model_print("Strengthened wildcards\n");
+ for (int i = 1; i <= curInfer->getSize(); i++) {
+ memory_order mo1 = (*curInfer)[i],
+ mo2 = (*initialInfer)[i];
+ int compVal = Inference::compareMemoryOrder(mo1, mo2);
+ if (!(compVal == 0 || compVal == 1)) {
+ model_print("assert failure\n");
+ model_print("compVal=%d\n", compVal);
+ ASSERT (false);
+ }
+ if (compVal == 0) // Same
+ continue;
+ model_print("wildcard %d -> %s (%s)\n", i, get_mo_str(mo1),
+ get_mo_str(mo2));
+ strengthened->push_back(i);
+ }
+
+ for (unsigned i = 0; i < strengthened->size(); i++) {
+ int w = (*strengthened)[i]; // The wildcard
+ memory_order mo1 = (*curInfer)[w];
+ memory_order mo2 = (*initialInfer)[w];
+ memory_order weakerMO = Inference::nextWeakOrder(mo1, mo2);
+ Inference *weakerInfer1 = new Inference(curInfer);
+ Inference *weakerInfer2 = NULL;
+ if (mo1 == memory_order_acq_rel) {
+ if (mo2 == memory_order_acquire) {
+ (*weakerInfer1)[w] = memory_order_acquire;
+ } else if (mo2 == memory_order_release) {
+ (*weakerInfer1)[w] = memory_order_release;
+ } else { // relaxed
+ (*weakerInfer1)[w] = memory_order_acquire;
+ weakerInfer2 = new Inference(curInfer);
+ (*weakerInfer2)[w] = memory_order_release;
+ }
+ } else {
+ if (mo2 != weakerMO)
+ (*weakerInfer1)[w] = weakerMO;
+ }
+
+ weakerInfer1->setShouldFix(false);
+ weakerInfer1->setLeaf(true);
+ if (weakerInfer2) {
+ weakerInfer2->setShouldFix(false);
+ weakerInfer2->setLeaf(true);
+ }
+
+ bool foundIt = false;
+ for (ModelList<Inference*>::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ Inference *discoveredInfer = *it;
+ // When we already have an equal inferences in the candidates list
+ int compVal = discoveredInfer->compareTo(weakerInfer1);
+ if (compVal == 0 && !discoveredInfer->isLeaf()) {
+ foundIt = true;
+ break;
+ }
+ }
+ if (!foundIt) {
+ addInference(weakerInfer1);
+ }
+ if (!weakerInfer2)
+ continue;
+ foundIt = false;
+ for (ModelList<Inference*>::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ Inference *discoveredInfer = *it;
+ // When we already have an equal inferences in the candidates list
+ int compVal = discoveredInfer->compareTo(weakerInfer2);
+ if (compVal == 0 && !discoveredInfer->isLeaf()) {
+ foundIt = true;
+ break;
+ }
+ }
+ if (!foundIt) {
+ addInference(weakerInfer2);
+ }
+ }
+
+ model_print("After adding weaker inferece, candidates size=%d\n",
+ candidates->getSize());
+}
+
+/** Add one possible node that represents a fix for the current inference;
+ * @Return true if the node to add has not been explored yet
+ */
+bool InferenceSet::addInference(Inference *infer) {
+ if (!hasBeenDiscovered(infer)) {
+ // We haven't discovered this inference yet
+
+ // Newly added nodes are leaf by default
+ infer->setLeaf(true);
+ candidates->push_back(infer);
+ discoveredSet->push_back(infer);
+ FENCE_PRINT("Discovered a parameter assignment with hashcode %lu\n", infer->getHash());
+ return true;
+ } else {
+ stat.notAddedAtFirstPlace++;
+ return false;
+ }
+}
+
+/** Return false if we haven't discovered that inference yet. Basically we
+ * search the candidates list */
+bool InferenceSet::hasBeenDiscovered(Inference *infer) {
+ ModelList<Inference*> *list = discoveredSet->getList();
+ for (ModelList<Inference*>::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ Inference *discoveredInfer = *it;
+ // When we already have an equal inferences in the candidates list
+ int compVal = discoveredInfer->compareTo(infer);
+ if (compVal == 0) {
+ FENCE_PRINT("%lu has beend discovered.\n",
+ infer->getHash());
+ return true;
+ }
+ // Or the discoveredInfer is explored and infer is strong than it is
+ if (compVal == -1 && discoveredInfer->isLeaf() &&
+ discoveredInfer->isExplored()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/** Return true if we have explored this inference yet. Basically we
+ * search the candidates list */
+bool InferenceSet::hasBeenExplored(Inference *infer) {
+ ModelList<Inference*> *list = discoveredSet->getList();
+ for (ModelList<Inference*>::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ Inference *discoveredInfer = *it;
+ if (!discoveredInfer->isExplored())
+ continue;
+ // When we already have an equal inferences in the candidates list
+ int compVal = discoveredInfer->compareTo(infer);
+ if (compVal == 0 || compVal == -1) {
+ return true;
+ }
+ }
+ return false;
+}
--- /dev/null
+#ifndef _INFERSET_H
+#define _INFERSET_H
+
+#include "fence_common.h"
+#include "patch.h"
+#include "inference.h"
+#include "inferlist.h"
+
+typedef struct inference_stat {
+ int notAddedAtFirstPlace;
+
+ inference_stat() :
+ notAddedAtFirstPlace(0) {}
+
+ void print() {
+ model_print("Inference process statistics output:...\n");
+ model_print("The number of inference not added at the first place: %d\n",
+ notAddedAtFirstPlace);
+ }
+} inference_stat_t;
+
+
+/** Essentially, we have to explore a big lattice of inferences, the bottom of
+ * which is the inference that has all relaxed orderings, and the top of which
+ * is the one that has all SC orderings. We define the partial ordering between
+ * inferences as in compareTo() function in the Infereence class. In another
+ * word, we compare ordering parameters one by one as a vector. Theoretically,
+ * we need to explore up to the number of 5^N inferences, where N denotes the
+ * number of wildcards (since we have 5 possible options for each memory order).
+
+ * We try to reduce the searching space by recording whether an inference has
+ * been discovered or not, and if so, we only need to explore from that
+ * inference for just once. We can use a set to record the inferences to be be
+ * explored, and insert new undiscovered fixes to that set iteratively until it
+ * gets empty.
+
+ * In detail, we use the InferenceList to represent that set, and use a
+ * LIFO-like actions in pushing and popping inferences. When we add an
+ * inference to the stack, we will set it to leaf or non-leaf node. For the
+ * current inference to be added, it is non-leaf node because it generates
+ * stronger inferences. On the other hands, for those generated inferences, we
+ * set them to be leaf node. So when we pop a leaf node, we know that it is
+ * not just discovered but thoroughly explored. Therefore, when we dicide
+ * whether an inference is discovered, we can first try to look up the
+ * discovered set and also derive those inferences that are stronger the
+ * explored ones to be discovered.
+
+ ********** The main algorithm **********
+ Initial:
+ InferenceSet set; // Store the candiates to explore in a set
+ Set discoveredSet; // Store the discovered candidates. For each discovered
+ // candidate, if it's explored (feasible or infeasible, meaning that
+ // they are already known to work or not work), we set it to be
+ // explored. With that, we can reduce the searching space by ignoring
+ // those candidates that are stronger than the explored ones.
+ Inference curInfer = RELAX; // Initialize the current inference to be all relaxed (RELAX)
+
+ API Methods:
+ bool addInference(infer, bool isLeaf) {
+ // Push back infer to the discovered when it's not discvoerd, and return
+ // whether it's added or not
+ }
+
+ void commitInference(infer, isFeasible) {
+ // Set the infer to be explored and add it to the result set if feasible
+ }
+
+ Inference* getNextInference() {
+ // Get the next unexplored inference so that we can contine searching
+ }
+*/
+
+class InferenceSet {
+ private:
+
+ /** The set of already discovered nodes in the tree */
+ InferenceList *discoveredSet;
+
+ /** The list of feasible inferences */
+ InferenceList *results;
+
+ /** The set of candidates */
+ InferenceList *candidates;
+
+ /** The staticstics of inference process */
+ inference_stat_t stat;
+
+ public:
+ InferenceSet();
+
+ /** Print the result of inferences */
+ void printResults();
+
+ /** Print candidates of inferences */
+ void printCandidates();
+
+ /** When we finish model checking or cannot further strenghen with the
+ * inference, we commit the inference to be explored; if it is feasible, we
+ * put it in the result list */
+ void commitInference(Inference *infer, bool feasible);
+
+ int getCandidatesSize();
+
+ int getResultsSize();
+
+ /** Be careful that if the candidate is not added, it will be deleted in this
+ * function. Therefore, caller of this function should just delete the list when
+ * finishing calling this function. */
+ bool addCandidates(Inference *curInfer, InferenceList *inferList);
+
+
+ /** Check if we have stronger or equal inferences in the current result
+ * list; if we do, we remove them and add the passed-in parameter infer */
+ void addResult(Inference *infer);
+
+ /** Get the next available unexplored node; @Return NULL
+ * if we don't have next, meaning that we are done with exploring */
+ Inference* getNextInference();
+
+ /** Add the current inference to the set before adding fixes to it; in
+ * this case, fixes will be added afterwards, and infer should've been
+ * discovered */
+ void addCurInference(Inference *infer);
+
+ /** Add one weaker node (the stronger one has been explored and known to be SC,
+ * we just want to know if a weaker one might also be SC).
+ * @Return true if the node to add has not been explored yet
+ */
+ void addWeakerInference(Inference *curInfer);
+
+ /** Add one possible node that represents a fix for the current inference;
+ * @Return true if the node to add has not been explored yet
+ */
+ bool addInference(Inference *infer);
+
+ /** Return false if we haven't discovered that inference yet. Basically we
+ * search the candidates list */
+ bool hasBeenDiscovered(Inference *infer);
+
+ /** Return true if we have explored this inference yet. Basically we
+ * search the candidates list */
+ bool hasBeenExplored(Inference *infer);
+
+ MEMALLOC
+};
+
+#endif
--- /dev/null
+#include "patch.h"
+#include "inference.h"
+
+Patch::Patch(const ModelAction *act, memory_order mo) {
+ PatchUnit *unit = new PatchUnit(act, mo);
+ units = new SnapVector<PatchUnit*>;
+ units->push_back(unit);
+}
+
+Patch::Patch(const ModelAction *act1, memory_order mo1, const ModelAction *act2, memory_order mo2) {
+ units = new SnapVector<PatchUnit*>;
+ PatchUnit *unit = new PatchUnit(act1, mo1);
+ units->push_back(unit);
+ unit = new PatchUnit(act2, mo2);
+ units->push_back(unit);
+}
+
+Patch::Patch() {
+ units = new SnapVector<PatchUnit*>;
+}
+
+bool Patch::canStrengthen(Inference *curInfer) {
+ if (!isApplicable())
+ return false;
+ bool res = false;
+ for (unsigned i = 0; i < units->size(); i++) {
+ PatchUnit *u = (*units)[i];
+ memory_order wildcard = u->getAct()->get_original_mo();
+ memory_order curMO = (*curInfer)[get_wildcard_id(wildcard)];
+ if (u->getMO() != curMO)
+ res = true;
+ }
+ return res;
+}
+
+bool Patch::isApplicable() {
+ for (unsigned i = 0; i < units->size(); i++) {
+ PatchUnit *u = (*units)[i];
+ memory_order wildcard = u->getAct()->get_original_mo();
+ if (is_wildcard(wildcard))
+ continue;
+ int compVal = Inference::compareMemoryOrder(wildcard, u->getMO());
+ if (compVal == 2 || compVal == -1)
+ return false;
+ }
+ return true;
+}
+
+void Patch::addPatchUnit(const ModelAction *act, memory_order mo) {
+ PatchUnit *unit = new PatchUnit(act, mo);
+ units->push_back(unit);
+}
+
+int Patch::getSize() {
+ return units->size();
+}
+
+PatchUnit* Patch::get(int i) {
+ return (*units)[i];
+}
+
+void Patch::print() {
+ for (unsigned i = 0; i < units->size(); i++) {
+ PatchUnit *u = (*units)[i];
+ model_print("wildcard %d -> %s\n",
+ get_wildcard_id_zero(u->getAct()->get_original_mo()),
+ get_mo_str(u->getMO()));
+ }
+}
--- /dev/null
+#ifndef _PATCH_H
+#define _PATCH_H
+
+#include "fence_common.h"
+#include "inference.h"
+
+class PatchUnit;
+class Patch;
+
+class PatchUnit {
+ private:
+ const ModelAction *act;
+ memory_order mo;
+
+ public:
+ PatchUnit(const ModelAction *act, memory_order mo) {
+ this->act= act;
+ this->mo = mo;
+ }
+
+ const ModelAction* getAct() {
+ return act;
+ }
+
+ memory_order getMO() {
+ return mo;
+ }
+
+ SNAPSHOTALLOC
+};
+
+class Patch {
+ private:
+ SnapVector<PatchUnit*> *units;
+
+ public:
+ Patch(const ModelAction *act, memory_order mo);
+
+ Patch(const ModelAction *act1, memory_order mo1, const ModelAction *act2,
+ memory_order mo2);
+
+ Patch();
+
+ bool canStrengthen(Inference *curInfer);
+
+ bool isApplicable();
+
+ void addPatchUnit(const ModelAction *act, memory_order mo);
+
+ int getSize();
+
+ PatchUnit* get(int i);
+
+ void print();
+
+ SNAPSHOTALLOC
+};
+
+#endif
--- /dev/null
+#ifndef _SC_ANNOTATION_H
+#define _SC_ANNOTATION_H
+
+#include "cdsannotate.h"
+#include "action.h"
+
+#define SC_ANNOTATION 2
+
+#define BEGIN 1
+#define END 2
+#define KEEP 3
+
+
+inline bool IS_SC_ANNO(ModelAction *act) {
+ return act != NULL && act->is_annotation() &&
+ act->get_value() == SC_ANNOTATION;
+}
+
+inline bool IS_ANNO_BEGIN(ModelAction *act) {
+ return (void*) BEGIN == act->get_location();
+}
+
+inline bool IS_ANNO_END(ModelAction *act) {
+ return (void*) END == act->get_location();
+}
+
+inline bool IS_ANNO_KEEP(ModelAction *act) {
+ return (void*) KEEP == act->get_location();
+}
+
+inline void SC_BEGIN() {
+ void *loc = (void*) BEGIN;
+ cdsannotate(SC_ANNOTATION, loc);
+}
+
+inline void SC_END() {
+ void *loc = (void*) END;
+ cdsannotate(SC_ANNOTATION, loc);
+}
+
+inline void SC_KEEP() {
+ void *loc = (void*) KEEP;
+ cdsannotate(SC_ANNOTATION, loc);
+}
+
+
+#endif
--- /dev/null
+#include "scfence.h"
+#include "action.h"
+#include "threads-model.h"
+#include "clockvector.h"
+#include "execution.h"
+#include "cyclegraph.h"
+#include <sys/time.h>
+
+#include "model.h"
+#include "wildcard.h"
+#include "inference.h"
+#include "inferset.h"
+#include "sc_annotation.h"
+#include "errno.h"
+#include <stdio.h>
+#include <algorithm>
+
+scfence_priv *SCFence::priv;
+
+SCFence::SCFence() :
+ scgen(new SCGenerator),
+ stats((struct sc_statistics *)model_calloc(1, sizeof(struct sc_statistics))),
+ execution(NULL)
+{
+ priv = new scfence_priv();
+ weaken = true;
+}
+
+SCFence::~SCFence() {
+ delete(stats);
+}
+
+void SCFence::setExecution(ModelExecution * execution) {
+ this->execution=execution;
+}
+
+const char * SCFence::name() {
+ const char * name = "SCFENCE";
+ return name;
+}
+
+void SCFence::finish() {
+ model_print("SCFence finishes!\n");
+}
+
+
+/******************** SCFence-related Functions (Beginning) ********************/
+
+void SCFence::inspectModelAction(ModelAction *act) {
+ if (act == NULL) // Get pass cases like data race detector
+ return;
+ if (act->get_mo() >= memory_order_relaxed && act->get_mo() <=
+ memory_order_seq_cst) {
+ return;
+ } else if (act->get_mo() == memory_order_normal) {
+ // For the purpose of eliminating data races
+ act->set_mo(memory_order_relaxed);
+ } else { // For wildcards
+ Inference *curInfer = getCurInference();
+ int wildcardID = get_wildcard_id(act->get_mo());
+ memory_order order = (*curInfer)[wildcardID];
+ if (order == WILDCARD_NONEXIST) {
+ (*curInfer)[wildcardID] = memory_order_relaxed;
+ act->set_mo(memory_order_relaxed);
+ } else {
+ act->set_mo((*curInfer)[wildcardID]);
+ }
+ }
+}
+
+
+void SCFence::actionAtInstallation() {
+ // When this pluggin gets installed, it become the inspect_plugin
+ model->set_inspect_plugin(this);
+}
+
+void SCFence::analyze(action_list_t *actions) {
+ scgen->setActions(actions);
+ scgen->setExecution(execution);
+ dup_threadlists = scgen->getDupThreadLists();
+ if (getTimeout() > 0 && isTimeout()) {
+ model_print("Backtrack because we reached the timeout bound.\n");
+ routineBacktrack(false);
+ return;
+ }
+
+ /* Build up the thread lists for general purpose */
+ mo_graph = execution->get_mo_graph();
+
+ // First of all check if there's any uninitialzed read bugs
+ if (execution->have_bug_reports()) {
+ setBuggy(true);
+ }
+
+ action_list_t *list = scgen->getSCList();
+ bool cyclic = scgen->getCyclic();
+
+ struct sc_statistics *s = scgen->getStats();
+ stats->nonsccount += s->nonsccount;
+ stats->sccount += s->sccount;
+ stats->elapsedtime += s->elapsedtime;
+ stats->actions += s->actions;
+
+ if (!cyclic && execution->have_bug_reports()) {
+ model_print("Be careful. This execution has bugs and still SC\n");
+ }
+
+ if (!cyclic && scgen->getPrintAlways()) {
+ scgen->print_list(list);
+ }
+
+ // Now we find a non-SC execution
+ if (cyclic) {
+ /******** The Non-SC case (beginning) ********/
+ // Only print those that should be fixed
+ if (getCurInference()->getShouldFix())
+ scgen->print_list(list);
+ bool added = addFixes(list, NON_SC);
+ if (added) {
+ routineAfterAddFixes();
+ return;
+ } else { // Couldn't imply anymore, backtrack
+ routineBacktrack(false);
+ return;
+ }
+ /******** The Non-SC case (end) ********/
+ } else {
+ /******** The implicit MO case (beginning) ********/
+ if (getInferImplicitMO() && execution->too_many_steps() &&
+ !execution->is_complete_execution()) {
+ // Only print those that should be fixed
+ if (getCurInference()->getShouldFix())
+ scgen->print_list(list);
+ bool added = addFixes(list, IMPLICIT_MO);
+ if (added) {
+ FENCE_PRINT("Found an implicit mo pattern to fix!\n");
+ routineAfterAddFixes();
+ return;
+ } else {
+ // This can be a good execution, so we can't do anything,
+ return;
+ }
+ }
+ /******** The implicit MO case (end) ********/
+
+ /******** The checking data races case (beginning) ********/
+ bool added = addFixes(list, DATA_RACE);
+ if (added) {
+ FENCE_PRINT("Found an data race pattern to fix!\n");
+ routineAfterAddFixes();
+ return;
+ }
+ }
+}
+
+void SCFence::exitModelChecker() {
+ model->exit_model_checker();
+}
+
+void SCFence::restartModelChecker() {
+ model->restart();
+ if (!getHasRestarted())
+ setHasRestarted(true);
+}
+
+bool SCFence::modelCheckerAtExitState() {
+ return model->get_exit_flag();
+}
+
+void SCFence::actionAtModelCheckingFinish() {
+ // Just bail if the model checker is at the exit state
+ if (modelCheckerAtExitState())
+ return;
+
+ /** Backtrack with a successful inference */
+ routineBacktrack(true);
+}
+
+void SCFence::initializeByFile() {
+ char *name = getCandidateFile();
+ FILE *fp = fopen(name, "r");
+ if (fp == NULL) {
+ fprintf(stderr, "Cannot open the input parameter assignment file: %s!\n", name);
+ perror(name);
+ exit(EXIT_FAILURE);
+ }
+ Inference *infer = NULL;
+ int curNum = 0;
+ memory_order mo = memory_order_relaxed;
+ char *str = (char *) malloc(sizeof(char) * (30 + 1));
+ bool isProcessing = false;
+ int ret = 0;
+ while (!feof(fp)) {
+ // Result #:
+ if (!isProcessing) {
+ ret = fscanf(fp, "%s", str);
+ }
+ if (strcmp(str, "Result") == 0 || isProcessing) { // In an inference
+ ret = fscanf(fp, "%s", str);
+
+ infer = new Inference();
+ isProcessing = false;
+ while (!feof(fp)) { // Processing a specific inference
+ // wildcard # -> memory_order
+ ret = fscanf(fp, "%s", str);
+
+ if (strcmp(str, "Result") == 0) {
+ isProcessing = true;
+ break;
+ }
+ ret = fscanf(fp, "%d", &curNum);
+ ret = fscanf(fp, "%s", str);
+ if (ret <= 0 && ret != -1) {
+ fprintf(stderr, "The input parameter assignment file has wrong format\n");
+ perror(name);
+ exit(EXIT_FAILURE);
+ }
+ ret = fscanf(fp, "%s", str);
+
+ if (strcmp(str, "memory_order_relaxed") == 0)
+ mo = memory_order_relaxed;
+ else if (strcmp(str, "memory_order_acquire") == 0)
+ mo = memory_order_acquire;
+ else if (strcmp(str, "memory_order_release") == 0)
+ mo = memory_order_release;
+ else if (strcmp(str, "memory_order_acq_rel") == 0)
+ mo = memory_order_acq_rel;
+ else if (strcmp(str, "memory_order_seq_cst") == 0)
+ mo = memory_order_seq_cst;
+ (*infer)[curNum] = mo;
+ }
+
+ /******** addInference ********/
+ if (!addInference(infer))
+ delete infer;
+ }
+ }
+ fclose(fp);
+
+ FENCE_PRINT("candidate size from file: %d\n", setSize());
+ Inference *next = getNextInference();
+ if (!next) {
+ model_print("Wrong with the candidate file\n");
+ } else {
+ setCurInference(next);
+ }
+}
+
+char* SCFence::parseOptionHelper(char *opt, int *optIdx) {
+ char *res = (char*) model_malloc(1024 * sizeof(char));
+ int resIdx = 0;
+ while (opt[*optIdx] != '\0' && opt[*optIdx] != '|') {
+ res[resIdx++] = opt[(*optIdx)++];
+ }
+ res[resIdx] = '\0';
+ return res;
+}
+
+bool SCFence::parseOption(char *opt) {
+ int optIdx = 0;
+ char *val = NULL;
+ while (true) {
+ char option = opt[optIdx++];
+ val = parseOptionHelper(opt, &optIdx);
+ switch (option) {
+ case 'f': // Read initial inference from file
+ setCandidateFile(val);
+ if (strcmp(val, "") == 0) {
+ model_print("Need to specify a file that contains initial inference!\n");
+ return true;
+ }
+ break;
+ case 'b': // The bound above
+ setImplicitMOReadBound(atoi(val));
+ if (getImplicitMOReadBound() <= 0) {
+ model_print("Enter valid bound value!\n");
+ return true;
+ }
+ break;
+ case 'm': // Infer the modification order from repetitive reads from
+ // the same write
+ setInferImplicitMO(true);
+ if (strcmp(val, "") != 0) {
+ model_print("option m doesn't take any arguments!\n");
+ return true;
+ }
+ break;
+ case 'a': // Turn on the annotation mode
+ scgen->setAnnotationMode(true);
+ if (strcmp(val, "") != 0) {
+ model_print("option a doesn't take any arguments!\n");
+ return true;
+ }
+ break;
+ case 't': // The timeout set to force the analysis to backtrack
+ model_print("Parsing t option!\n");
+ model_print("t value: %s\n", val);
+ setTimeout(atoi(val));
+ break;
+ default:
+ model_print("Unknown SCFence option: %c!\n", option);
+ return true;
+ break;
+ }
+ if (opt[optIdx] == '|') {
+ optIdx++;
+ } else {
+ break;
+ }
+ }
+ return false;
+
+}
+
+char* SCFence::getInputFileName(char *opt) {
+ char *res = NULL;
+ if (opt[0] == 'f' &&
+ opt[1] == 'i' &&
+ opt[2] == 'l' &&
+ opt[3] == 'e' &&
+ opt[4] == '-') {
+
+ res = (char*) model_malloc(1024 * sizeof(char));
+ int i = 0, j = 5;
+ while (opt[j] != '\0') {
+ res[i++] = opt[j++];
+ }
+ res[i] = '\0';
+ }
+ return res;
+}
+
+int SCFence::getImplicitMOBound(char *opt) {
+ char *num = NULL;
+ if (opt[0] == 'b' &&
+ opt[1] == 'o' &&
+ opt[2] == 'u' &&
+ opt[3] == 'n' &&
+ opt[4] == 'd' &&
+ opt[5] == '-') {
+
+ num = (char*) model_malloc(1024 * sizeof(char));
+ int i = 0, j = 6;
+ while (opt[j] != '\0') {
+ num[i++] = opt[j++];
+ }
+ num[i] = '\0';
+ }
+ if (num) {
+ return atoi(num);
+ } else {
+ return 0;
+ }
+}
+
+bool SCFence::option(char * opt) {
+ char *inputFileName = NULL;
+ unsigned implicitMOBoundNum = 0;
+
+ if (strcmp(opt, "verbose")==0) {
+ scgen->setPrintAlways(true);
+ return false;
+ } else if (strcmp(opt, "buggy")==0) {
+ return false;
+ } else if (strcmp(opt, "quiet")==0) {
+ scgen->setPrintBuggy(false);
+ return false;
+ } else if (strcmp(opt, "nonsc")==0) {
+ scgen->setPrintNonSC(true);
+ return false;
+ } else if (strcmp(opt, "time")==0) {
+ time=true;
+ return false;
+ } else if (strcmp(opt, "no-weaken")==0) {
+ weaken=false;
+ return false;
+ } else if (strcmp(opt, "anno")==0) {
+ scgen->setAnnotationMode(true);
+ return false;
+ } else if (strcmp(opt, "implicit-mo")==0) {
+ setInferImplicitMO(true);
+ return false;
+ } else if ((inputFileName = getInputFileName(opt)) != NULL) {
+ if (strcmp(inputFileName, "") == 0) {
+ model_print("Need to specify a file that contains initial inference!\n");
+ return true;
+ }
+ setCandidateFile(inputFileName);
+ initializeByFile();
+ return false;
+ } else if ((implicitMOBoundNum = getImplicitMOBound(opt)) > 0) {
+ setImplicitMOReadBound(implicitMOBoundNum);
+ return false;
+ } else {
+ model_print("file-InputFile -- takes candidate file as argument right after the symbol '-' \n");
+ model_print("no-weaken -- turn off the weakening mode (by default ON)\n");
+ model_print("anno -- turn on the annotation mode (by default OFF)\n");
+ model_print("implicit-mo -- imply implicit modification order, takes no arguments (by default OFF, default bound is %d\n", DEFAULT_REPETITIVE_READ_BOUND);
+ model_print("bound-NUM -- specify the bound for the implicit mo implication, takes a number as argument right after the symbol '-'\n");
+ model_print("\n");
+ return true;
+ }
+}
+
+/** Check whether a chosen reads-from path is a release sequence */
+bool SCFence::isReleaseSequence(path_t *path) {
+ ASSERT (path);
+ path_t::reverse_iterator rit = path->rbegin(),
+ rit_next;
+ const ModelAction *read,
+ *write,
+ *next_read;
+ for (; rit != path->rend(); rit++) {
+ read = *rit;
+ rit_next = rit;
+ rit_next++;
+ write = read->get_reads_from();
+ if (rit_next != path->rend()) {
+ next_read = *rit_next;
+ if (write != next_read) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/** Only return those applicable patches in a vector */
+SnapVector<Patch*>* SCFence::getAcqRel(const ModelAction *read, const
+ ModelAction *readBound,const ModelAction *write, const ModelAction *writeBound) {
+
+ SnapVector<Patch*> *patches = new SnapVector<Patch*>;
+ /* Also support rel/acq fences synchronization here */
+ // Look for the acq fence after the read action
+ int readThrdID = read->get_tid(),
+ writeThrdID = write->get_tid();
+ action_list_t *readThrd = &(*dup_threadlists)[readThrdID],
+ *writeThrd = &(*dup_threadlists)[writeThrdID];
+ action_list_t::iterator readIter = std::find(readThrd->begin(),
+ readThrd->end(), read);
+ action_list_t::reverse_iterator writeIter = std::find(writeThrd->rbegin(),
+ writeThrd->rend(), write);
+
+ action_list_t *acqFences = new action_list_t,
+ *relFences = new action_list_t;
+ ModelAction *act = *readIter;
+ while (readIter++ != readThrd->end() && act != readBound) {
+ if (act->is_fence()) {
+ acqFences->push_back(act);
+ }
+ act = *readIter;
+ }
+ act = *writeIter;
+ while (writeIter++ != writeThrd->rend() && act != writeBound) {
+ if (act->is_fence()) {
+ relFences->push_back(act);
+ }
+ act = *writeIter;
+ }
+ // Now we have a list of rel/acq fences at hand
+ int acqFenceSize = acqFences->size(),
+ relFenceSize = relFences->size();
+
+ Patch *p;
+ if (acqFenceSize == 0 && relFenceSize == 0) {
+ //return patches;
+ } else if (acqFenceSize > 0 && relFenceSize == 0) {
+ for (action_list_t::iterator it = acqFences->begin(); it !=
+ acqFences->end(); it++) {
+ ModelAction *fence = *it;
+ p = new Patch(fence, memory_order_acquire, write, memory_order_release);
+ if (p->isApplicable())
+ patches->push_back(p);
+ }
+ } else if (acqFenceSize == 0 && relFenceSize > 0) {
+ for (action_list_t::iterator it = relFences->begin(); it !=
+ relFences->end(); it++) {
+ ModelAction *fence = *it;
+ p = new Patch(fence, memory_order_release, read, memory_order_acquire);
+ if (p->isApplicable())
+ patches->push_back(p);
+ }
+ } else {
+ /* Impose on both relFences and acqFences */
+ bool twoFences = false;
+ for (action_list_t::iterator it1 = relFences->begin(); it1 !=
+ relFences->end(); it1++) {
+ ModelAction *relFence = *it1;
+ for (action_list_t::iterator it2 = acqFences->begin(); it2 !=
+ acqFences->end(); it2++) {
+ ModelAction *acqFence = *it2;
+ p = new Patch(relFence, memory_order_release, acqFence, memory_order_acquire);
+ if (p->isApplicable()) {
+ patches->push_back(p);
+ twoFences = true;
+ }
+ }
+ }
+ if (!twoFences) {
+ /* Only impose on the acqFences */
+ for (action_list_t::iterator it = acqFences->begin(); it !=
+ acqFences->end(); it++) {
+ ModelAction *fence = *it;
+ p = new Patch(fence, memory_order_acquire, write, memory_order_release);
+ if (p->isApplicable())
+ patches->push_back(p);
+ }
+ /* Only impose on the relFences */
+ for (action_list_t::iterator it = relFences->begin(); it !=
+ relFences->end(); it++) {
+ ModelAction *fence = *it;
+ p = new Patch(fence, memory_order_release, read, memory_order_acquire);
+ if (p->isApplicable())
+ patches->push_back(p);
+ }
+ }
+ }
+
+ // If we can find a fix with fences, we don't fix on the operation
+ if (patches->size() > 0)
+ return patches;
+ p = new Patch(read, memory_order_acquire, write,
+ memory_order_release);
+ if (p->isApplicable())
+ patches->push_back(p);
+ return patches;
+}
+
+/** Impose the synchronization between the begin and end action, and the paths
+ * are a list of paths that each represent the union of rfUsb. We then
+ * strengthen the current inference by necessity.
+ */
+bool SCFence::imposeSync(InferenceList *inferList,
+ paths_t *paths, const ModelAction *begin, const ModelAction *end) {
+ if (!inferList)
+ return false;
+ bool res = false;
+ for (paths_t::iterator i_paths = paths->begin(); i_paths != paths->end(); i_paths++) {
+ // Iterator over all the possible paths
+ path_t *path = *i_paths;
+ InferenceList *cands = new InferenceList;
+ // Impose synchronization by path
+ if (imposeSync(cands, path, begin, end))
+ res = true;
+ inferList->append(cands);
+ }
+ return res;
+}
+
+/** Impose the synchronization between the begin and end action, and the path
+ * is the union of rfUsb. We then strengthen the current inference by
+ * necessity.
+ */
+bool SCFence::imposeSync(InferenceList *inferList,
+ path_t *path, const ModelAction *begin, const ModelAction *end) {
+
+ bool release_seq = isReleaseSequence(path);
+ SnapVector<Patch*> *patches;
+ if (release_seq) {
+ const ModelAction *relHead = path->front()->get_reads_from(),
+ *lastRead = path->back();
+ patches = getAcqRel(lastRead, end, relHead, begin);
+ if (patches->size() == 0)
+ return false;
+ inferList->applyPatch(getCurInference(), patches);
+ delete patches;
+ // Bail now for the release sequence because
+ return true;
+ }
+
+ for (path_t::iterator it = path->begin(); it != path->end(); it++) {
+ const ModelAction *read = *it,
+ *write = read->get_reads_from(),
+ *prevRead = NULL, *nextRead;
+
+ const ModelAction *readBound = NULL,
+ *writeBound = NULL;
+ nextRead = *++it;
+ if (it == path->end()) {
+ nextRead = NULL;
+ }
+ it--;
+ if (prevRead) {
+ readBound = prevRead->get_reads_from();
+ } else {
+ readBound = end;
+ }
+ if (nextRead) {
+ writeBound = nextRead;
+ } else {
+ writeBound = begin;
+ }
+
+ /* Extend to support rel/acq fences synchronization here */
+ patches = getAcqRel(read, readBound, write, writeBound);
+
+ if (patches->size() == 0) {
+ // We cannot strengthen the inference
+ return false;
+ }
+
+ inferList->applyPatch(getCurInference(), patches);
+ delete patches;
+ }
+ return true;
+}
+
+/** Impose SC orderings to related actions (including fences) such that we
+ * either want to establish mo between act1 & act2 (act1 -mo-> act2) when they
+ * can't establish hb; or that act2 can't read from any actions that are
+ * modification ordered before act1. All these requirements are consistent with
+ * the following strengthening strategy:
+ * 1. make both act1 and act2 SC
+ * 2. if there are fences in between act1 & act2, and the fences are either in
+ * the threads of either act1 or act2, we can impose SC on the fences or
+ * corresponding actions.
+ */
+bool SCFence::imposeSC(action_list_t * actions, InferenceList *inferList, const ModelAction *act1, const ModelAction *act2) {
+ if (!inferList) {
+ return false;
+ }
+ action_list_t::iterator act1Iter = std::find(actions->begin(),
+ actions->end(), act1);
+ action_list_t::iterator act2Iter = std::find(act1Iter,
+ actions->end(), act2);
+
+ action_list_t::iterator it = act1Iter;
+ it++;
+ action_list_t *fences = new action_list_t;
+ int size = 0;
+ while (it != act2Iter) {
+ ModelAction *fence = *it;
+ it++;
+ if (!fence->is_fence())
+ continue;
+ if (!is_wildcard(fence->get_original_mo()))
+ continue;
+ fences->push_back(fence);
+ size++;
+ }
+
+ Patch *p;
+ SnapVector<Patch*> *patches = new SnapVector<Patch*>;
+
+ bool twoFences = false;
+ // Impose SC on two fences
+ for (action_list_t::iterator fit1 = fences->begin(); fit1 != fences->end();
+ fit1++) {
+ ModelAction *fence1 = *fit1;
+ action_list_t::iterator fit2 = fit1;
+ fit2++;
+ for (; fit2 != fences->end(); fit2++) {
+ ModelAction *fence2 = *fit2;
+ p = new Patch(fence1, memory_order_seq_cst, fence2, memory_order_seq_cst);
+ if (p->isApplicable()) {
+ Inference *curInfer = getCurInference();
+ memory_order mo1 = (*curInfer)[get_wildcard_id(fence1->get_original_mo())];
+ memory_order mo2 = (*curInfer)[get_wildcard_id(fence2->get_original_mo())];
+ // We can avoid this by adding two fences
+ if (mo1 != memory_order_seq_cst || mo2 != memory_order_seq_cst)
+ twoFences = true;
+ patches->push_back(p);
+ }
+ }
+ }
+
+ // Just impose SC on one fence
+ if (!twoFences) {
+ for (action_list_t::iterator fit = fences->begin(); fit != fences->end();
+ fit++) {
+ ModelAction *fence = *fit;
+ model_print("one fence\n");
+ fence->print();
+ p = new Patch(act1, memory_order_seq_cst, fence, memory_order_seq_cst);
+ if (p->isApplicable()) {
+ // We can avoid this by adding two fences
+ patches->push_back(p);
+ }
+ p = new Patch(act2, memory_order_seq_cst, fence, memory_order_seq_cst);
+ if (p->isApplicable()) {
+ // We can avoid this by adding two fences
+ patches->push_back(p);
+ }
+ }
+
+ p = new Patch(act1, memory_order_seq_cst, act2, memory_order_seq_cst);
+ if (p->isApplicable()) {
+ patches->push_back(p);
+ }
+ }
+
+ if (patches->size() > 0) {
+ inferList->applyPatch(getCurInference(), patches);
+ return true;
+ }
+ return false;
+}
+
+/** A subroutine that calculates the potential fixes for the non-sc pattern (a),
+ * a.k.a old value reading. The whole idea is as follows.
+ * write -isc-> write2 && write2 -isc->read && write -rf-> read!!!
+ * The fix can be two-step:
+ * a. make write -mo-> write2, and we can accomplish this by imposing hb
+ * between write and write2, and if not possible, make write -sc-> write2
+ * b. make write2 -hb-> read, and if not possible, make write2 -sc-> read.
+ */
+InferenceList* SCFence::getFixesFromPatternA(action_list_t *list, action_list_t::iterator readIter, action_list_t::iterator writeIter) {
+ ModelAction *read = *readIter,
+ *write = *writeIter;
+
+ InferenceList *candidates = new InferenceList;
+ paths_t *paths1, *paths2;
+
+ // Find all writes between the write1 and the read
+ action_list_t *write2s = new action_list_t();
+ ModelAction *write2;
+ action_list_t::iterator findIt = writeIter;
+ findIt++;
+ do {
+ write2 = *findIt;
+ if (write2->is_write() && write2->get_location() ==
+ write->get_location()) {
+ write2s->push_back(write2);
+ }
+ findIt++;
+ } while (findIt != readIter);
+
+ // Found all the possible write2s
+ FENCE_PRINT("write2s set size: %ld\n", write2s->size());
+ for (action_list_t::iterator itWrite2 = write2s->begin();
+ itWrite2 != write2s->end(); itWrite2++) {
+ InferenceList *tmpCandidates = new InferenceList;
+ write2 = *itWrite2;
+ // Whether the write and the write2 originally have modification order
+ bool isWritesMO = false;
+ FENCE_PRINT("write2:\n");
+ ACT_PRINT(write2);
+ // write1->write2 (write->write2)
+ // Whether write -mo-> write2
+ if (!mo_graph->checkReachable(write, write2)) {
+ paths1 = get_rf_sb_paths(write, write2);
+ if (paths1->size() > 0) {
+ FENCE_PRINT("From write1 to write2: \n");
+ print_rf_sb_paths(paths1, write, write2);
+ // FIXME: Need to make sure at least one path is feasible; what
+ // if we got empty candidates here, maybe should then impose SC,
+ // same in the write2->read
+ imposeSync(tmpCandidates, paths1, write, write2);
+ } else {
+ FENCE_PRINT("Have to impose sc on write1 & write2: \n");
+ ACT_PRINT(write);
+ ACT_PRINT(write2);
+ imposeSC(list, tmpCandidates, write, write2);
+ }
+ } else {
+ if (!write->happens_before(write2)) {
+ isWritesMO = true;
+ }
+ FENCE_PRINT("write1 mo before write2. \n");
+ }
+
+ // write2->read (write2->read)
+ // now we only need to make write2 -hb-> read
+ if (!write2->happens_before(read)) {
+ paths2 = get_rf_sb_paths(write2, read);
+ if (paths2->size() > 0) {
+ FENCE_PRINT("From write2 to read: \n");
+ print_rf_sb_paths(paths2, write2, read);
+ imposeSync(tmpCandidates, paths2, write2, read);
+ } else {
+ FENCE_PRINT("Have to impose sc on write2 & read: \n");
+ ACT_PRINT(write2);
+ ACT_PRINT(read);
+ imposeSC(list, tmpCandidates, write2, read);
+ if (isWritesMO) {
+ // Also need to make sure write -sc/hb-> write2
+ FENCE_PRINT("Also have to impose hb/sc on write & write2: \n");
+ ACT_PRINT(write);
+ ACT_PRINT(write2);
+ paths1 = get_rf_sb_paths(write, write2);
+ if (paths1->size() > 0) {
+ FENCE_PRINT("Impose hb, from write1 to write2: \n");
+ print_rf_sb_paths(paths1, write, write2);
+ imposeSync(tmpCandidates, paths1, write, write2);
+ } else {
+ FENCE_PRINT("Also have to impose sc on write1 & write2: \n");
+ ACT_PRINT(write);
+ ACT_PRINT(write2);
+ imposeSC(list, tmpCandidates, write, write2);
+ }
+ }
+ }
+ } else {
+ FENCE_PRINT("write2 hb before read. \n");
+ }
+ candidates->append(tmpCandidates);
+ }
+ // Return the complete list of candidates
+ return candidates;
+}
+
+/** To fix this pattern, we have two options:
+ * 1. impose futureWrite -hb-> read so that the SC analysis will order
+ * in a way that the the write is ordered before the read;
+ * 2. impose read -hb->futureWrite so that the reads-from edge from futureWrite
+ * to the read is not allowed.
+*/
+InferenceList* SCFence::getFixesFromPatternB(action_list_t *list, action_list_t::iterator readIter, action_list_t::iterator writeIter) {
+ InferenceList *res = new InferenceList,
+ *candidates = new InferenceList;
+
+ ModelAction *read = *readIter,
+ *write = *writeIter;
+ // Fixing one direction (read -> futureWrite)
+ paths_t *paths1 = get_rf_sb_paths(read, write);
+ if (paths1->size() > 0) {
+ FENCE_PRINT("From read to future write: \n");
+ print_rf_sb_paths(paths1, read, write);
+ imposeSync(res, paths1, read, write);
+ }
+
+ // Fixing the other direction (futureWrite -> read) for one edge case
+ paths_t *paths2 = get_rf_sb_paths(write, read);
+ FENCE_PRINT("From future write to read (edge case): \n");
+ print_rf_sb_paths(paths2, write, read);
+ imposeSync(candidates, paths2, write, read);
+
+ // Append the candidates to the res list
+ res->append(candidates);
+ return res;
+}
+
+bool SCFence::addFixesNonSC(action_list_t *list) {
+ bool added = false;
+ for (action_list_t::iterator it = list->begin(); it != list->end(); it++) {
+ InferenceList *candidates = NULL;
+ ModelAction *act = *it;
+
+ // Save the iterator of the read and the write
+ action_list_t::iterator readIter = it, writeIter;
+ if (act->get_seq_number() > 0) {
+ // Annotated reads will be ignored
+ if (scgen->getBadrfset()->contains(act) &&
+ !scgen->getAnnotatedReadSet()->contains(act)) {
+ const ModelAction *write = act->get_reads_from();
+ // Check reading old or future value
+ writeIter = std::find(list->begin(),
+ list->end(), write);
+ action_list_t::iterator findIt = std::find(list->begin(),
+ readIter, write);
+ bool readOldVal = findIt != readIter ? true : false;
+
+ if (readOldVal) { // Pattern (a) read old value
+ FENCE_PRINT("Running through pattern (a)!\n");
+ candidates = getFixesFromPatternA(list, readIter, writeIter);
+ // Add candidates pattern (a)
+
+ added = addCandidates(candidates);
+ } else { // Pattern (b) read future value
+ // act->read, write->futureWrite
+ FENCE_PRINT("Running through pattern (b)!\n");
+ candidates = getFixesFromPatternB(list, readIter, writeIter);
+ // Add candidates pattern (b)
+ added = addCandidates(candidates);
+ }
+ // Just eliminate the first cycle we see in the execution
+ break;
+ }
+ }
+ }
+ return added;
+}
+
+
+/** Return false to indicate there's no fixes for this execution. This can
+ * happen for specific reason such as it's a user-specified assertion failure */
+bool SCFence::addFixesBuggyExecution(action_list_t *list) {
+ InferenceList *candidates = new InferenceList;
+ bool foundFix = false;
+ bool added = false;
+ for (action_list_t::reverse_iterator rit = list->rbegin(); rit !=
+ list->rend(); rit++) {
+ ModelAction *uninitRead = *rit;
+ if (uninitRead->get_seq_number() > 0) {
+ if (!uninitRead->is_read() ||
+ !uninitRead->get_reads_from()->is_uninitialized())
+ continue;
+ for (action_list_t::iterator it = list->begin(); it !=
+ list->end(); it++) {
+ ModelAction *write = *it;
+ if (write->same_var(uninitRead)) {
+ // Now we can try to impose sync write hb-> uninitRead
+ paths_t *paths1 = get_rf_sb_paths(write, uninitRead);
+ if (paths1->size() > 0) {
+ FENCE_PRINT("Running through pattern (b') (unint read)!\n");
+ print_rf_sb_paths(paths1, write, uninitRead);
+ imposeSync(candidates, paths1, write, uninitRead);
+ added = addCandidates(candidates);
+ if (added) {
+ foundFix = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (foundFix)
+ break;
+ }
+
+ return added;
+}
+
+/** Return false to indicate there's no implied mo for this execution. The idea
+ * is that it counts the number of reads in the middle of write1 and write2, if
+ * that number exceeds a specific number, then the analysis thinks that the
+ * program is stuck in an infinite loop because write1 does not establish proper
+ * synchronization with write2 such that the reads can read from write1 for
+ * ever. */
+bool SCFence::addFixesImplicitMO(action_list_t *list) {
+ bool added = false;
+ InferenceList *candidates = new InferenceList;
+ for (action_list_t::reverse_iterator rit2 = list->rbegin(); rit2 !=
+ list->rend(); rit2++) {
+ ModelAction *write2 = *rit2;
+ if (!write2->is_write())
+ continue;
+ action_list_t::reverse_iterator rit1 = rit2;
+ rit1++;
+ for (; rit1 != list->rend(); rit1++) {
+ ModelAction *write1 = *rit1;
+ if (!write1->is_write() || write1->get_location() !=
+ write2->get_location())
+ continue;
+ int readCnt = 0;
+ action_list_t::reverse_iterator ritRead = rit2;
+ ritRead++;
+ for (; ritRead != rit1; ritRead++) {
+ ModelAction *read = *ritRead;
+ if (!read->is_read() || read->get_location() !=
+ write1->get_location())
+ continue;
+ readCnt++;
+ }
+ if (readCnt > getImplicitMOReadBound()) {
+ // Found it, make write1 --hb-> write2
+ bool isMO = execution->get_mo_graph()->checkReachable(write1, write2);
+ if (isMO) // Only impose mo when it doesn't have mo impsed
+ break;
+ FENCE_PRINT("Running through pattern (c) -- implicit mo!\n");
+ FENCE_PRINT("Read count between the two writes: %d\n", readCnt);
+ FENCE_PRINT("implicitMOReadBound: %d\n",
+ getImplicitMOReadBound());
+ ACT_PRINT(write1);
+ ACT_PRINT(write2);
+ paths_t *paths1 = get_rf_sb_paths(write1, write2);
+ if (paths1->size() > 0) {
+ FENCE_PRINT("From write1 to write2: \n");
+ print_rf_sb_paths(paths1, write1, write2);
+ imposeSync(candidates, paths1, write1, write2);
+ // Add the candidates as potential results
+ added = addCandidates(candidates);
+ if (added)
+ return true;
+ } else {
+ FENCE_PRINT("Cannot establish hb between write1 & write2: \n");
+ ACT_PRINT(write1);
+ ACT_PRINT(write2);
+ }
+ }
+ break;
+ }
+ }
+ return false;
+}
+
+bool SCFence::checkDataRace(action_list_t *list, ModelAction **act1,
+ ModelAction **act2) {
+
+ SnapList<action_list_t*> *opList = new SnapList<action_list_t*>;
+ /** Collect the operations per location */
+ for (action_list_t::iterator iter = list->begin(); iter != list->end();
+ iter++) {
+ ModelAction *act = *iter;
+ if (act->get_original_mo() != memory_order_normal)
+ continue;
+ bool foundIt = false;
+ for (SnapList<action_list_t*>::iterator listIter = opList->begin();
+ listIter != opList->end(); listIter++) {
+ action_list_t *list = *listIter;
+ ModelAction *listAct = *(list->begin());
+ if (listAct->get_location() != act->get_location())
+ continue;
+ foundIt = true;
+ list->push_back(act);
+ }
+ if (!foundIt) {
+ action_list_t *newList = new action_list_t;
+ newList->push_back(act);
+ opList->push_back(newList);
+ }
+ }
+
+ if (opList->size() == 0)
+ return false;
+ /** Now check if any two operations (same loc) establish hb */
+ for (SnapList<action_list_t*>::iterator listIter = opList->begin();
+ listIter != opList->end(); listIter++) {
+ action_list_t *list = *listIter;
+ action_list_t::iterator it1, it2;
+ for (it1 = list->begin(); it1 != list->end(); it1++) {
+ ModelAction *raceAct1 = *it1;
+ it2 = it1;
+ it2++;
+ for (; it2 != list->end(); it2++) {
+ ModelAction *raceAct2 = *it2;
+ if (!raceAct1->happens_before(raceAct2) &&
+ !raceAct2->happens_before(raceAct1)) {
+ *act1 = raceAct1;
+ *act2 = raceAct2;
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+ModelAction* SCFence::sbPrevAction(ModelAction *act) {
+ int idx = id_to_int(act->get_tid());
+ // Retrieves the thread list of the action
+ action_list_t *list = &(*scgen->getDupThreadLists())[idx];
+ action_list_t::iterator iter = std::find(list->begin(),
+ list->end(), act);
+ return *--iter;
+}
+
+ModelAction* SCFence::sbNextAction(ModelAction *act) {
+ int idx = id_to_int(act->get_tid());
+ // Retrieves the thread list of the action
+ action_list_t *list = &(*scgen->getDupThreadLists())[idx];
+ action_list_t::iterator iter = std::find(list->begin(),
+ list->end(), act);
+ return *++iter;
+}
+
+bool SCFence::addFixesDataRace(action_list_t *list) {
+ ModelAction *act1, *act2;
+ bool hasRace = checkDataRace(list, &act1, &act2);
+ if (hasRace) {
+ InferenceList *candidates1 = new InferenceList,
+ *candidates2 = new InferenceList;
+ paths_t *paths1, *paths2;
+ model_print("Fixing data race: \n");
+ paths1 = get_rf_sb_paths(sbNextAction(act1), sbPrevAction(act2));
+ paths2 = get_rf_sb_paths(sbNextAction(act2), sbPrevAction(act1));
+ bool added = false;
+ if (paths1->size() > 0) {
+ model_print("paths1: \n");
+ print_rf_sb_paths(paths1, act1, act2);
+ imposeSync(candidates1, paths1, act1, act2);
+ bool added = addCandidates(candidates1);
+ if (paths2->size() > 0) {
+ model_print("paths2: \n");
+ print_rf_sb_paths(paths2, act2, act1);
+ imposeSync(candidates2, paths2, act2, act1);
+ added |= addCandidates(candidates2);
+ }
+ }
+ return added;
+ }
+ return false;
+}
+
+bool SCFence::addFixes(action_list_t *list, fix_type_t type) {
+ // First check whether this is a later weakened inference
+ if (!getCurInference()->getShouldFix())
+ return false;
+ bool added = false;
+ switch(type) {
+ case BUGGY_EXECUTION:
+ added = addFixesBuggyExecution(list);
+ break;
+ case IMPLICIT_MO:
+ added = addFixesImplicitMO(list);
+ break;
+ case NON_SC:
+ added = addFixesNonSC(list);
+ break;
+ case DATA_RACE:
+ added = addFixesDataRace(list);
+ break;
+ default:
+ break;
+ }
+ if (added && isBuggy()) {
+ // If this is a buggy inference and we have got fixes for it, set the
+ // falg
+ setHasFixes(true);
+ }
+ return added;
+}
+
+
+bool SCFence::routineBacktrack(bool feasible) {
+ model_print("Backtrack routine:\n");
+
+ /******** commitCurInference ********/
+ Inference *curInfer = getCurInference();
+ commitInference(curInfer, feasible);
+ if (feasible) {
+ if (!isBuggy()) {
+ model_print("Found one result!\n");
+ } else if (!hasFixes()) { // Buggy and have no fixes
+ model_print("Found one buggy candidate!\n");
+ }
+ curInfer->print();
+ // Try to weaken this inference
+ if (weaken && !isBuggy()) {
+ getSet()->addWeakerInference(curInfer);
+ }
+
+ } else {
+ if (curInfer->getShouldFix()) {
+ model_print("Reach an infeasible inference!\n");
+ } else {
+ model_print("Get an unweakenable inference!\n");
+ curInfer->print(true);
+ }
+ }
+
+ /******** getNextInference ********/
+ Inference *next = getNextInference();
+
+ if (next) {
+ /******** setCurInference ********/
+ setCurInference(next);
+ /******** restartModelChecker ********/
+ restartModelChecker();
+ return true;
+ } else {
+ // Finish exploring the whole process
+ model_print("We are done with the whole process!\n");
+ model_print("The results are as the following:\n");
+ printResults();
+ printCandidates();
+
+ /******** exitModelChecker ********/
+ exitModelChecker();
+
+ return false;
+ }
+}
+
+void SCFence::routineAfterAddFixes() {
+ model_print("Add fixes routine begin:\n");
+
+ /******** getNextInference ********/
+ Inference *next = getNextInference();
+ //ASSERT (next);
+
+ if (next) {
+ /******** setCurInference ********/
+ setCurInference(next);
+ /******** restartModelChecker ********/
+ restartModelChecker();
+
+ model_print("Add fixes routine end:\n");
+ model_print("Restart checking with the follwing inference:\n");
+ getCurInference()->print();
+ model_print("Checking...\n");
+ } else {
+ routineBacktrack(false);
+ }
+}
+
+
+
+/** This function finds all the paths that is a union of reads-from &
+ * sequence-before relationship between act1 & act2. */
+paths_t * SCFence::get_rf_sb_paths(const ModelAction *act1, const ModelAction *act2) {
+ int idx1 = id_to_int(act1->get_tid()),
+ idx2 = id_to_int(act2->get_tid());
+ // Retrieves the two lists of actions of thread1 & thread2
+ action_list_t *list1 = &(*dup_threadlists)[idx1],
+ *list2 = &(*dup_threadlists)[idx2];
+ if (list1->size() == 0 || list2->size() == 0) {
+ return new paths_t();
+ }
+
+ // The container for all possible results
+ paths_t *paths = new paths_t();
+ // A stack that records all current possible paths
+ paths_t *stack = new paths_t();
+ path_t *path;
+ // Initialize the stack with loads sb-ordered before act2
+ for (action_list_t::iterator it2 = list2->begin(); it2 != list2->end(); it2++) {
+ ModelAction *act = *it2;
+ // Add read action not sb after the act2 (including act2)
+ if (act->get_seq_number() > act2->get_seq_number())
+ break;
+ if (!act->is_read())
+ continue;
+ // Each start with a possible path
+ path = new path_t();
+ path->push_front(act);
+ stack->push_back(path);
+ }
+ while (stack->size() > 0) {
+ path = stack->back();
+ stack->pop_back();
+ // The latest read in the temporary path
+ ModelAction *read = path->front();
+ const ModelAction *write = read->get_reads_from();
+ // If the read is uninitialized, don't do it
+ if (write->get_seq_number() == 0) {
+ delete path;
+ continue;
+ }
+ /** In case of cyclic sbUrf, make sure the write appears in a new thread
+ * or in an existing thread that is sequence-before the added read
+ * actions
+ */
+ bool loop = false;
+ for (path_t::iterator p_it = path->begin(); p_it != path->end();
+ p_it++) {
+ ModelAction *addedRead = *p_it;
+ if (id_to_int(write->get_tid()) == id_to_int(addedRead->get_tid())) {
+ // In the same thread
+ if (write->get_seq_number() >= addedRead->get_seq_number()) {
+ // Have a loop
+ delete path;
+ loop = true;
+ break;
+ }
+ }
+ }
+ // Not a useful rfUsb path (loop)
+ if (loop) {
+ continue;
+ }
+
+ unsigned write_seqnum = write->get_seq_number();
+ // We then check if we got a valid path
+ if (id_to_int(write->get_tid()) == idx1 &&
+ write_seqnum >= act1->get_seq_number()) {
+ // Find a path
+ paths->push_back(path);
+ continue;
+ }
+ // Extend the path with the latest read
+ int idx = id_to_int(write->get_tid());
+ action_list_t *list = &(*dup_threadlists)[idx];
+ for (action_list_t::iterator it = list->begin(); it != list->end(); it++) {
+ ModelAction *act = *it;
+ if (act->get_seq_number() > write_seqnum) // Could be RMW
+ break;
+ if (!act->is_read())
+ continue;
+ path_t *new_path = new path_t(*path);
+ new_path->push_front(act);
+ stack->push_back(new_path);
+ }
+ delete path;
+ }
+ return paths;
+}
+
+void SCFence::print_rf_sb_paths(paths_t *paths, const ModelAction *start, const ModelAction *end) {
+ FENCE_PRINT("Starting from:\n");
+ ACT_PRINT(start);
+ FENCE_PRINT("\n");
+ for (paths_t::iterator paths_i = paths->begin(); paths_i !=
+ paths->end(); paths_i++) {
+ path_t *path = *paths_i;
+ FENCE_PRINT("Path %ld, size (%ld):\n", distance(paths->begin(), paths_i),
+ path->size());
+ path_t::iterator it = path->begin(), i_next;
+ for (; it != path->end(); it++) {
+ i_next = it;
+ i_next++;
+ const ModelAction *read = *it,
+ *write = read->get_reads_from(),
+ *next_read = (i_next != path->end()) ? *i_next : NULL;
+ ACT_PRINT(write);
+ if (next_read == NULL || next_read->get_reads_from() != read) {
+ // Not the same RMW, also print the read operation
+ ACT_PRINT(read);
+ }
+ }
+ // Output a linebreak at the end of the path
+ FENCE_PRINT("\n");
+ }
+ FENCE_PRINT("Ending with:\n");
+ ACT_PRINT(end);
+}
+
+/******************** SCFence-related Functions (End) ********************/
--- /dev/null
+#ifndef _SCFENCE_H
+#define _SCFENCE_H
+#include "traceanalysis.h"
+#include "scanalysis.h"
+#include "hashtable.h"
+#include "memoryorder.h"
+#include "action.h"
+
+#include "wildcard.h"
+#include "patch.h"
+#include "inference.h"
+#include "inferlist.h"
+#include "inferset.h"
+
+#include "model.h"
+#include "cyclegraph.h"
+#include "scgen.h"
+
+#include <sys/time.h>
+
+#ifdef __cplusplus
+using std::memory_order;
+#endif
+
+#define DEFAULT_REPETITIVE_READ_BOUND 20
+
+#define FENCE_OUTPUT
+
+#ifdef FENCE_OUTPUT
+
+#define FENCE_PRINT model_print
+
+#define ACT_PRINT(x) (x)->print()
+
+#define CV_PRINT(x) (x)->print()
+
+#define WILDCARD_ACT_PRINT(x)\
+ FENCE_PRINT("Wildcard: %d\n", get_wildcard_id_zero((x)->get_original_mo()));\
+ ACT_PRINT(x);
+
+#else
+
+#define FENCE_PRINT
+
+#define ACT_PRINT(x)
+
+#define CV_PRINT(x)
+
+#define WILDCARD_ACT_PRINT(x)
+
+#endif
+
+/* Forward declaration */
+class SCFence;
+class Inference;
+class InferenceList;
+class PatchUnit;
+class Patch;
+class SCGenerator;
+
+extern SCFence *scfence;
+
+typedef action_list_t path_t;
+/** A list of load operations can represent the union of reads-from &
+ * sequence-before edges; And we have a list of lists of load operations to
+ * represent all the possible rfUsb paths between two nodes, defined as
+ * syns_paths_t here
+ */
+typedef SnapList<path_t *> sync_paths_t;
+typedef sync_paths_t paths_t;
+
+typedef struct scfence_priv {
+ scfence_priv() {
+ inferenceSet = new InferenceSet();
+ curInference = new Inference();
+ candidateFile = NULL;
+ inferImplicitMO = false;
+ hasRestarted = false;
+ implicitMOReadBound = DEFAULT_REPETITIVE_READ_BOUND;
+ timeout = 0;
+ gettimeofday(&lastRecordedTime, NULL);
+ }
+
+ /** The set of the InferenceNode we maintain for exploring */
+ InferenceSet *inferenceSet;
+
+ /** The current inference */
+ Inference *curInference;
+
+ /** The file which provides a list of candidate wilcard inferences */
+ char *candidateFile;
+
+ /** Whether we consider the repetitive read to infer mo (_m) */
+ bool inferImplicitMO;
+
+ /** The bound above which we think that write should be the last write (_b) */
+ int implicitMOReadBound;
+
+ /** Whether we have restarted the model checker */
+ bool hasRestarted;
+
+ /** The amount of time (in second) set to force the analysis to backtrack */
+ int timeout;
+
+ /** The time we recorded last time */
+ struct timeval lastRecordedTime;
+
+ MEMALLOC
+} scfence_priv;
+
+typedef enum fix_type {
+ BUGGY_EXECUTION,
+ IMPLICIT_MO,
+ NON_SC,
+ DATA_RACE
+} fix_type_t;
+
+
+class SCFence : public TraceAnalysis {
+ public:
+ SCFence();
+ ~SCFence();
+ virtual void setExecution(ModelExecution * execution);
+ virtual void analyze(action_list_t *);
+ virtual const char * name();
+ virtual bool option(char *);
+ virtual void finish();
+
+ virtual void inspectModelAction(ModelAction *ac);
+ virtual void actionAtInstallation();
+ virtual void actionAtModelCheckingFinish();
+
+ SNAPSHOTALLOC
+
+ private:
+ /** The SC list generator */
+ SCGenerator *scgen;
+
+ struct sc_statistics *stats;
+
+ bool time;
+
+ /** Should we weaken the inferences later */
+ bool weaken;
+
+ /** Modification order graph */
+ const CycleGraph *mo_graph;
+
+ /** A duplica of the thread lists */
+ SnapVector<action_list_t> *dup_threadlists;
+ ModelExecution *execution;
+
+ /** A set of actions that should be ignored in the partially SC analysis */
+ HashTable<const ModelAction*, const ModelAction*, uintptr_t, 4> ignoredActions;
+ int ignoredActionSize;
+
+ /** The non-snapshotting private compound data structure that has the
+ * necessary stuff for the scfence analysis */
+ static scfence_priv *priv;
+
+ /** For the SC analysis */
+ void update_stats();
+
+ /** Get the custom input number for implicit bound */
+ int getImplicitMOBound(char *opt);
+
+ /** Get the input file for initial parameter assignments */
+ char* getInputFileName(char *opt);
+
+ /** The function to parse the SCFence plugin options */
+ bool parseOption(char *opt);
+
+ /** Helper function for option parsing */
+ char* parseOptionHelper(char *opt, int *optIdx);
+
+ /** Initialize the search with a file with a list of potential candidates */
+ void initializeByFile();
+
+ /** A pass through the original actions to extract the ignored actions
+ * (partially SC); it must be called after the threadlist has been built */
+ void extractIgnoredActions();
+
+ /** Functions that work for infering the parameters by impsing
+ * synchronization */
+ paths_t *get_rf_sb_paths(const ModelAction *act1, const ModelAction *act2);
+
+ /** Printing function for those paths imposed by happens-before; only for
+ * the purpose of debugging */
+ void print_rf_sb_paths(paths_t *paths, const ModelAction *start, const ModelAction *end);
+
+ /** Whether there's an edge between from and to actions */
+ bool isSCEdge(const ModelAction *from, const ModelAction *to) {
+ return from->is_seqcst() && to->is_seqcst();
+ }
+
+ bool isConflicting(const ModelAction *act1, const ModelAction *act2) {
+ return act1->get_location() == act2->get_location() ? (act1->is_write()
+ || act2->is_write()) : false;
+ }
+
+ /** The two important routine when we find or cannot find a fix for the
+ * current inference */
+ void routineAfterAddFixes();
+
+ bool routineBacktrack(bool feasible);
+
+ /** A subroutine to find candidates for pattern (a) */
+ InferenceList* getFixesFromPatternA(action_list_t *list, action_list_t::iterator readIter, action_list_t::iterator writeIter);
+
+ /** A subroutine to find candidates for pattern (b) */
+ InferenceList* getFixesFromPatternB(action_list_t *list, action_list_t::iterator readIter, action_list_t::iterator writeIter);
+
+ /** Check if there exists data races, if so, overwrite act1 & act2 to be the
+ * two */
+ bool checkDataRace(action_list_t *list, ModelAction **act1,
+ ModelAction **act2);
+
+ /** Check if there exists data races, if so, generate the fixes */
+ bool addFixesDataRace(action_list_t *list);
+
+ /** The previous action in sb */
+ ModelAction* sbPrevAction(ModelAction *act);
+ /** The next action in sb */
+ ModelAction* sbNextAction(ModelAction *act);
+
+ /** When getting a non-SC execution, find potential fixes and add it to the
+ * set */
+ bool addFixesNonSC(action_list_t *list);
+
+ /** When getting a buggy execution (we only target the uninitialized loads
+ * here), find potential fixes and add it to the set */
+ bool addFixesBuggyExecution(action_list_t *list);
+
+ /** When getting an SC and bug-free execution, we check whether we should
+ * fix the implicit mo problems. If so, find potential fixes and add it to
+ * the set */
+ bool addFixesImplicitMO(action_list_t *list);
+
+ /** General fixes wrapper */
+ bool addFixes(action_list_t *list, fix_type_t type);
+
+ /** Check whether a chosen reads-from path is a release sequence */
+ bool isReleaseSequence(path_t *path);
+
+ /** Impose synchronization to the existing list of inferences (inferList)
+ * according to path, begin is the beginning operation, and end is the
+ * ending operation. */
+ bool imposeSync(InferenceList* inferList, paths_t *paths, const
+ ModelAction *begin, const ModelAction *end);
+
+ bool imposeSync(InferenceList* inferList, path_t *path, const
+ ModelAction *begin, const ModelAction *end);
+
+ /** For a specific pair of write and read actions, figure out the possible
+ * acq/rel fences that can impose hb plus the read & write sync pair */
+ SnapVector<Patch*>* getAcqRel(const ModelAction *read,
+ const ModelAction *readBound, const ModelAction *write,
+ const ModelAction *writeBound);
+
+ /** Impose SC to the existing list of inferences (inferList) by action1 &
+ * action2. */
+ bool imposeSC(action_list_t * actions, InferenceList *inferList, const ModelAction *act1,
+ const ModelAction *act2);
+
+ /** When we finish model checking or cannot further strenghen with the
+ * current inference, we commit the current inference (the node at the back
+ * of the set) to be explored, pop it out of the set; if it is feasible,
+ * we put it in the result list */
+ void commitInference(Inference *infer, bool feasible) {
+ getSet()->commitInference(infer, feasible);
+ }
+
+ /** Get the next available unexplored node; @Return NULL
+ * if we don't have next, meaning that we are done with exploring */
+ Inference* getNextInference() {
+ return getSet()->getNextInference();
+ }
+
+ /** Add one possible node that represents a fix for the current inference;
+ * @Return true if the node to add has not been explored yet
+ */
+ bool addInference(Inference *infer) {
+ return getSet()->addInference(infer);
+ }
+
+ /** Add the list of fixes to the inference set. We will have to pass in the
+ * current inference.;
+ * @Return true if the node to add has not been explored yet
+ */
+ bool addCandidates(InferenceList *candidates) {
+ return getSet()->addCandidates(getCurInference(), candidates);
+ }
+
+ void addCurInference() {
+ getSet()->addCurInference(getCurInference());
+ }
+
+ /** Print the result of inferences */
+ void printResults() {
+ getSet()->printResults();
+ }
+
+ /** Print the candidates of inferences */
+ void printCandidates() {
+ getSet()->printCandidates();
+ }
+
+ /** The number of nodes in the set (including those parent nodes (set as
+ * explored) */
+ int setSize() {
+ return getSet()->getCandidatesSize();
+ }
+
+ /** Set the restart flag of the model checker in order to restart the
+ * checking process */
+ void restartModelChecker();
+
+ /** Set the exit flag of the model checker in order to exit the whole
+ * process */
+ void exitModelChecker();
+
+ bool modelCheckerAtExitState();
+
+ const char* net_mo_str(memory_order order);
+
+ InferenceSet* getSet() {
+ return priv->inferenceSet;
+ }
+
+ void setHasFixes(bool val) {
+ getCurInference()->setHasFixes(val);
+ }
+
+ bool hasFixes() {
+ return getCurInference()->getHasFixes();
+ }
+
+ bool isBuggy() {
+ return getCurInference()->getBuggy();
+ }
+
+ void setBuggy(bool val) {
+ getCurInference()->setBuggy(val);
+ }
+
+ Inference* getCurInference() {
+ return priv->curInference;
+ }
+
+ void setCurInference(Inference* infer) {
+ priv->curInference = infer;
+ }
+
+ char* getCandidateFile() {
+ return priv->candidateFile;
+ }
+
+ void setCandidateFile(char* file) {
+ priv->candidateFile = file;
+ }
+
+ bool getInferImplicitMO() {
+ return priv->inferImplicitMO;
+ }
+
+ void setInferImplicitMO(bool val) {
+ priv->inferImplicitMO = val;
+ }
+
+ int getImplicitMOReadBound() {
+ return priv->implicitMOReadBound;
+ }
+
+ void setImplicitMOReadBound(int bound) {
+ priv->implicitMOReadBound = bound;
+ }
+
+ int getHasRestarted() {
+ return priv->hasRestarted;
+ }
+
+ void setHasRestarted(int val) {
+ priv->hasRestarted = val;
+ }
+
+ void setTimeout(int timeout) {
+ priv->timeout = timeout;
+ }
+
+ int getTimeout() {
+ return priv->timeout;
+ }
+
+ bool isTimeout() {
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ // Check if it should be timeout
+ struct timeval *lastRecordedTime = &priv->lastRecordedTime;
+ unsigned long long elapsedTime = (now.tv_sec*1000000 + now.tv_usec) -
+ (lastRecordedTime->tv_sec*1000000 + lastRecordedTime->tv_usec);
+
+ // Update the lastRecordedTime
+ priv->lastRecordedTime = now;
+ if (elapsedTime / 1000000.0 > priv->timeout)
+ return true;
+ else
+ return false;
+ }
+
+ /********************** SCFence-related stuff (end) **********************/
+};
+#endif
--- /dev/null
+#include "scgen.h"
+
+SCGenerator::SCGenerator() :
+ execution(NULL),
+ actions(NULL),
+ cvmap(),
+ cyclic(false),
+ badrfset(),
+ lastwrmap(),
+ threadlists(1),
+ dup_threadlists(1),
+ print_always(false),
+ print_buggy(false),
+ print_nonsc(false),
+ stats(new struct sc_statistics),
+ annotationMode(false) {
+}
+
+SCGenerator::~SCGenerator() {
+}
+
+bool SCGenerator::getCyclic() {
+ return cyclic || hasBadRF;
+}
+
+SnapVector<action_list_t>* SCGenerator::getDupThreadLists() {
+ return &dup_threadlists;
+}
+
+struct sc_statistics* SCGenerator::getStats() {
+ return stats;
+}
+
+void SCGenerator::setExecution(ModelExecution *execution) {
+ this->execution = execution;
+}
+
+void SCGenerator::setActions(action_list_t *actions) {
+ this->actions = actions;
+}
+
+void SCGenerator::setPrintAlways(bool val) {
+ this->print_always = val;
+}
+
+bool SCGenerator::getPrintAlways() {
+ return this->print_always;
+}
+
+bool SCGenerator::getHasBadRF() {
+ return this->hasBadRF;
+}
+
+void SCGenerator::setPrintBuggy(bool val) {
+ this->print_buggy = val;
+}
+
+void SCGenerator::setPrintNonSC(bool val) {
+ this->print_nonsc = val;
+}
+
+void SCGenerator::setAnnotationMode(bool val) {
+ this->annotationMode = val;
+}
+
+action_list_t * SCGenerator::getSCList() {
+ struct timeval start;
+ struct timeval finish;
+ gettimeofday(&start, NULL);
+
+ /* Build up the thread lists for general purpose */
+ int thrdNum;
+ buildVectors(&dup_threadlists, &thrdNum, actions);
+
+ fastVersion = true;
+ action_list_t *list = generateSC(actions);
+ if (cyclic) {
+ reset(actions);
+ delete list;
+ fastVersion = false;
+ list = generateSC(actions);
+ }
+ check_rf(list);
+ gettimeofday(&finish, NULL);
+ stats->elapsedtime+=((finish.tv_sec*1000000+finish.tv_usec)-(start.tv_sec*1000000+start.tv_usec));
+ update_stats();
+ return list;
+}
+
+HashTable<const ModelAction *, const ModelAction *, uintptr_t, 4> * SCGenerator::getBadrfset() {
+ return &badrfset;
+}
+
+HashTable<const ModelAction *, const ModelAction *, uintptr_t, 4 > * SCGenerator::getAnnotatedReadSet() {
+ return &annotatedReadSet;
+}
+
+void SCGenerator::print_list(action_list_t *list) {
+ model_print("---------------------------------------------------------------------\n");
+ if (cyclic || hasBadRF)
+ model_print("Not SC\n");
+ unsigned int hash = 0;
+
+ for (action_list_t::iterator it = list->begin(); it != list->end(); it++) {
+ const ModelAction *act = *it;
+ if (act->get_seq_number() > 0) {
+ if (badrfset.contains(act))
+ model_print("BRF ");
+ act->print();
+ if (badrfset.contains(act)) {
+ model_print("Desired Rf: %u \n", badrfset.get(act)->get_seq_number());
+ }
+ }
+ hash = hash ^ (hash << 3) ^ ((*it)->hash());
+ }
+ model_print("HASH %u\n", hash);
+ model_print("---------------------------------------------------------------------\n");
+}
+
+
+action_list_t * SCGenerator::generateSC(action_list_t *list) {
+ int numactions=buildVectors(&threadlists, &maxthreads, list);
+ stats->actions+=numactions;
+
+ // Analyze which actions we should ignore in the partially SC analysis
+ if (annotationMode) {
+ collectAnnotatedReads();
+ if (annotationError) {
+ model_print("Annotation error!\n");
+ return NULL;
+ }
+ }
+
+ computeCV(list);
+
+ action_list_t *sclist = new action_list_t();
+ ModelAction **array = (ModelAction **)model_calloc(1, (maxthreads + 1) * sizeof(ModelAction *));
+ int * choices = (int *) model_calloc(1, sizeof(int)*numactions);
+ int endchoice = 0;
+ int currchoice = 0;
+ int lastchoice = -1;
+ while (true) {
+ int numActions = getNextActions(array);
+ if (numActions == 0)
+ break;
+ ModelAction * act=pruneArray(array, numActions);
+ if (act == NULL) {
+ if (currchoice < endchoice) {
+ act = array[choices[currchoice]];
+ //check whether there is still another option
+ if ((choices[currchoice]+1)<numActions)
+ lastchoice=currchoice;
+ currchoice++;
+ } else {
+ act = array[0];
+ choices[currchoice]=0;
+ if (numActions>1)
+ lastchoice=currchoice;
+ currchoice++;
+ }
+ }
+ thread_id_t tid = act->get_tid();
+ //remove action
+ threadlists[id_to_int(tid)].pop_front();
+ //add ordering constraints from this choice
+ if (updateConstraints(act)) {
+ //propagate changes if we have them
+ bool prevc=cyclic;
+ computeCV(list);
+ if (!prevc && cyclic) {
+ model_print("ROLLBACK in SC\n");
+ //check whether we have another choice
+ if (lastchoice != -1) {
+ //have to reset everything
+ choices[lastchoice]++;
+ endchoice=lastchoice+1;
+ currchoice=0;
+ lastchoice=-1;
+
+ reset(list);
+ buildVectors(&threadlists, &maxthreads, list);
+ computeCV(list);
+ sclist->clear();
+ continue;
+
+ }
+ }
+ }
+ //add action to end
+ sclist->push_back(act);
+ }
+ model_free(array);
+ return sclist;
+}
+
+void SCGenerator::update_stats() {
+ if (cyclic) {
+ stats->nonsccount++;
+ } else {
+ stats->sccount++;
+ }
+}
+
+int SCGenerator::buildVectors(SnapVector<action_list_t> *threadlist, int *maxthread,
+ action_list_t *list) {
+ *maxthread = 0;
+ int numactions = 0;
+ for (action_list_t::iterator it = list->begin(); it != list->end(); it++) {
+ ModelAction *act = *it;
+ numactions++;
+ int threadid = id_to_int(act->get_tid());
+ if (threadid > *maxthread) {
+ threadlist->resize(threadid + 1);
+ *maxthread = threadid;
+ }
+ (*threadlist)[threadid].push_back(act);
+ }
+ return numactions;
+}
+
+
+bool SCGenerator::updateConstraints(ModelAction *act) {
+ bool changed = false;
+ for (int i = 0; i <= maxthreads; i++) {
+ thread_id_t tid = int_to_id(i);
+ if (tid == act->get_tid())
+ continue;
+
+ action_list_t *list = &threadlists[id_to_int(tid)];
+ for (action_list_t::iterator rit = list->begin(); rit != list->end(); rit++) {
+ ModelAction *write = *rit;
+ if (!write->is_write())
+ continue;
+ ClockVector *writecv = cvmap.get(write);
+ if (writecv->synchronized_since(act))
+ break;
+ if (write->get_location() == act->get_location()) {
+ //write is sc after act
+ merge(writecv, write, act);
+ changed = true;
+ break;
+ }
+ }
+ }
+ return changed;
+}
+
+void SCGenerator::computeCV(action_list_t *list) {
+ bool changed = true;
+ bool firsttime = true;
+ ModelAction **last_act = (ModelAction **)model_calloc(1, (maxthreads + 1) * sizeof(ModelAction *));
+
+ while (changed) {
+ changed = changed&firsttime;
+ firsttime = false;
+ bool updateFuture = false;
+
+ for (action_list_t::iterator it = list->begin(); it != list->end(); it++) {
+ ModelAction *act = *it;
+ ModelAction *lastact = last_act[id_to_int(act->get_tid())];
+ if (act->is_thread_start())
+ lastact = execution->get_thread(act)->get_creation();
+ last_act[id_to_int(act->get_tid())] = act;
+ ClockVector *cv = cvmap.get(act);
+ if (cv == NULL) {
+ cv = new ClockVector(act->get_cv(), act);
+ cvmap.put(act, cv);
+ }
+
+ if (lastact != NULL) {
+ merge(cv, act, lastact);
+ }
+ if (act->is_thread_join()) {
+ Thread *joinedthr = act->get_thread_operand();
+ ModelAction *finish = execution->get_last_action(joinedthr->get_id());
+ changed |= merge(cv, act, finish);
+ }
+ if (act->is_read()) {
+ if (fastVersion) {
+ changed |= processReadFast(act, cv);
+ } else if (annotatedReadSet.contains(act)) {
+ changed |= processAnnotatedReadSlow(act, cv, &updateFuture);
+ } else {
+ changed |= processReadSlow(act, cv, &updateFuture);
+ }
+ }
+ }
+ /* Reset the last action array */
+ if (changed) {
+ bzero(last_act, (maxthreads + 1) * sizeof(ModelAction *));
+ } else {
+ if (!fastVersion) {
+ if (!allowNonSC) {
+ allowNonSC = true;
+ changed = true;
+ } else {
+ break;
+ }
+ }
+ }
+ }
+ model_free(last_act);
+}
+
+bool SCGenerator::processReadFast(ModelAction *read, ClockVector *cv) {
+ bool changed = false;
+
+ /* Merge in the clock vector from the write */
+ const ModelAction *write = read->get_reads_from();
+ if (!write) { // The case where the write is a promise
+ return false;
+ }
+ ClockVector *writecv = cvmap.get(write);
+ changed |= merge(cv, read, write) && (*read < *write);
+
+ for (int i = 0; i <= maxthreads; i++) {
+ thread_id_t tid = int_to_id(i);
+ action_list_t *list = execution->get_actions_on_obj(read->get_location(), tid);
+ if (list == NULL)
+ continue;
+ for (action_list_t::reverse_iterator rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *write2 = *rit;
+ if (!write2->is_write())
+ continue;
+ if (write2 == write)
+ continue;
+ if (write2 == read) // If read is a RMW
+ continue;
+
+ ClockVector *write2cv = cvmap.get(write2);
+ if (write2cv == NULL)
+ continue;
+ /* write -sc-> write2 &&
+ write -rf-> R =>
+ R -sc-> write2 */
+ if (write2cv->synchronized_since(write)) {
+ changed |= merge(write2cv, write2, read);
+
+ }
+
+ //looking for earliest write2 in iteration to satisfy this
+ /* write2 -sc-> R &&
+ write -rf-> R =>
+ write2 -sc-> write */
+ if (cv->synchronized_since(write2)) {
+ changed |= writecv == NULL || merge(writecv, write, write2);
+ break;
+ }
+ }
+ }
+ return changed;
+}
+
+bool SCGenerator::processReadSlow(ModelAction *read, ClockVector *cv, bool *updateFuture) {
+ bool changed = false;
+
+ /* Merge in the clock vector from the write */
+ const ModelAction *write = read->get_reads_from();
+ ClockVector *writecv = cvmap.get(write);
+ if ((*write < *read) || ! *updateFuture) {
+ bool status = merge(cv, read, write) && (*read < *write);
+ changed |= status;
+ *updateFuture = status;
+ }
+
+ for (int i = 0; i <= maxthreads; i++) {
+ thread_id_t tid = int_to_id(i);
+ action_list_t *list = execution->get_actions_on_obj(read->get_location(), tid);
+ if (list == NULL)
+ continue;
+ for (action_list_t::reverse_iterator rit = list->rbegin(); rit != list->rend(); rit++) {
+ ModelAction *write2 = *rit;
+ if (!write2->is_write())
+ continue;
+ if (write2 == write)
+ continue;
+ if (write2 == read) // If read is a RMW
+ continue;
+
+ ClockVector *write2cv = cvmap.get(write2);
+ if (write2cv == NULL)
+ continue;
+
+ /* write -sc-> write2 &&
+ write -rf-> R =>
+ R -sc-> write2 */
+ if (write2cv->synchronized_since(write)) {
+ if ((*read < *write2) || ! *updateFuture) {
+ bool status = merge(write2cv, write2, read);
+ changed |= status;
+ *updateFuture |= status && (*write2 < *read);
+ }
+ }
+
+ //looking for earliest write2 in iteration to satisfy this
+ /* write2 -sc-> R &&
+ write -rf-> R =>
+ write2 -sc-> write */
+ if (cv->synchronized_since(write2)) {
+ if ((*write2 < *write) || ! *updateFuture) {
+ bool status = writecv == NULL || merge(writecv, write, write2);
+ changed |= status;
+ *updateFuture |= status && (*write < *write2);
+ }
+ break;
+ }
+ }
+ }
+ return changed;
+}
+
+bool SCGenerator::processAnnotatedReadSlow(ModelAction *read, ClockVector *cv, bool *updateFuture) {
+ bool changed = false;
+
+ /* Merge in the clock vector from the write */
+ const ModelAction *write = read->get_reads_from();
+ if ((*write < *read) || ! *updateFuture) {
+ bool status = merge(cv, read, write) && (*read < *write);
+ changed |= status;
+ *updateFuture = status;
+ }
+ return changed;
+}
+
+int SCGenerator::getNextActions(ModelAction **array) {
+ int count=0;
+
+ for (int t = 0; t <= maxthreads; t++) {
+ action_list_t *tlt = &threadlists[t];
+ if (tlt->empty())
+ continue;
+ ModelAction *act = tlt->front();
+ ClockVector *cv = cvmap.get(act);
+
+ /* Find the earliest in SC ordering */
+ for (int i = 0; i <= maxthreads; i++) {
+ if ( i == t )
+ continue;
+ action_list_t *threadlist = &threadlists[i];
+ if (threadlist->empty())
+ continue;
+ ModelAction *first = threadlist->front();
+ if (cv->synchronized_since(first)) {
+ act = NULL;
+ break;
+ }
+ }
+ if (act != NULL) {
+ array[count++]=act;
+ }
+ }
+ if (count != 0)
+ return count;
+ for (int t = 0; t <= maxthreads; t++) {
+ action_list_t *tlt = &threadlists[t];
+ if (tlt->empty())
+ continue;
+ ModelAction *act = tlt->front();
+ ClockVector *cv = act->get_cv();
+
+ /* Find the earliest in SC ordering */
+ for (int i = 0; i <= maxthreads; i++) {
+ if ( i == t )
+ continue;
+ action_list_t *threadlist = &threadlists[i];
+ if (threadlist->empty())
+ continue;
+ ModelAction *first = threadlist->front();
+ if (cv->synchronized_since(first)) {
+ act = NULL;
+ break;
+ }
+ }
+ if (act != NULL) {
+ array[count++]=act;
+ }
+ }
+
+ ASSERT(count==0 || cyclic);
+
+ return count;
+}
+
+bool SCGenerator::merge(ClockVector *cv, const ModelAction *act, const ModelAction *act2) {
+ ClockVector *cv2 = cvmap.get(act2);
+ if (cv2 == NULL)
+ return true;
+
+ if (cv2->getClock(act->get_tid()) >= act->get_seq_number() && act->get_seq_number() != 0) {
+ cyclic = true;
+ //refuse to introduce cycles into clock vectors
+ return false;
+ }
+ if (fastVersion) {
+ bool status = cv->merge(cv2);
+ return status;
+ } else {
+ bool merged;
+ if (allowNonSC) {
+ merged = cv->merge(cv2);
+ if (merged)
+ allowNonSC = false;
+ return merged;
+ } else {
+ if (act2->happens_before(act) ||
+ (act->is_seqcst() && act2->is_seqcst() && *act2 < *act)) {
+ return cv->merge(cv2);
+ } else {
+ return false;
+ }
+ }
+ }
+
+}
+
+void SCGenerator::check_rf1(action_list_t *list) {
+ bool hasBadRF1 = false;
+ HashTable<const ModelAction *, const ModelAction *, uintptr_t, 4 > badrfset1;
+ HashTable<void *, const ModelAction *, uintptr_t, 4 > lastwrmap1;
+ for (action_list_t::iterator it = list->begin(); it != list->end(); it++) {
+ const ModelAction *act = *it;
+ if (act->is_read()) {
+ if (act->get_reads_from() != lastwrmap1.get(act->get_location())) {
+ badrfset1.put(act, lastwrmap1.get(act->get_location()));
+ hasBadRF1 = true;
+ }
+ }
+ if (act->is_write())
+ lastwrmap1.put(act->get_location(), act);
+ }
+ if (cyclic != hasBadRF1 && !annotationMode) {
+ if (cyclic)
+ model_print("Assert failure & non-SC\n");
+ else
+ model_print("Assert failure & SC\n");
+ if (fastVersion) {
+ model_print("Fast\n");
+ } else {
+ model_print("Slow\n");
+ }
+ print_list(list);
+ }
+ if (!annotationMode) {
+ ASSERT (cyclic == hasBadRF1);
+ }
+}
+
+void SCGenerator::check_rf(action_list_t *list) {
+ hasBadRF = false;
+ for (action_list_t::iterator it = list->begin(); it != list->end(); it++) {
+ const ModelAction *act = *it;
+ if (act->is_read()) {
+ const ModelAction *write = act->get_reads_from();
+ if (write && write != lastwrmap.get(act->get_location())) {
+ badrfset.put(act, lastwrmap.get(act->get_location()));
+ hasBadRF = true;
+ }
+ }
+ if (act->is_write())
+ lastwrmap.put(act->get_location(), act);
+ }
+ if (cyclic != hasBadRF && !annotationMode) {
+ if (cyclic)
+ model_print("Assert failure & non-SC\n");
+ else
+ model_print("Assert failure & SC\n");
+ if (fastVersion) {
+ model_print("Fast\n");
+ } else {
+ model_print("Slow\n");
+ }
+ print_list(list);
+ }
+ if (!annotationMode) {
+ ASSERT (cyclic == hasBadRF);
+ }
+}
+
+void SCGenerator::reset(action_list_t *list) {
+ for (int t = 0; t <= maxthreads; t++) {
+ action_list_t *tlt = &threadlists[t];
+ tlt->clear();
+ }
+ for (action_list_t::iterator it = list->begin(); it != list->end(); it++) {
+ ModelAction *act = *it;
+ delete cvmap.get(act);
+ cvmap.put(act, NULL);
+ }
+
+ cyclic=false;
+}
+
+ModelAction* SCGenerator::pruneArray(ModelAction **array, int count) {
+ /* No choice */
+ if (count == 1)
+ return array[0];
+
+ /* Choose first non-write action */
+ ModelAction *nonwrite=NULL;
+ for(int i=0;i<count;i++) {
+ if (!array[i]->is_write())
+ if (nonwrite==NULL || nonwrite->get_seq_number() > array[i]->get_seq_number())
+ nonwrite = array[i];
+ }
+ if (nonwrite != NULL)
+ return nonwrite;
+
+ /* Look for non-conflicting action */
+ ModelAction *nonconflict=NULL;
+ for(int a=0;a<count;a++) {
+ ModelAction *act=array[a];
+ for (int i = 0; i <= maxthreads && act != NULL; i++) {
+ thread_id_t tid = int_to_id(i);
+ if (tid == act->get_tid())
+ continue;
+
+ action_list_t *list = &threadlists[id_to_int(tid)];
+ for (action_list_t::iterator rit = list->begin(); rit != list->end(); rit++) {
+ ModelAction *write = *rit;
+ if (!write->is_write())
+ continue;
+ ClockVector *writecv = cvmap.get(write);
+ if (writecv->synchronized_since(act))
+ break;
+ if (write->get_location() == act->get_location()) {
+ //write is sc after act
+ act = NULL;
+ break;
+ }
+ }
+ }
+ if (act != NULL) {
+ if (nonconflict == NULL || nonconflict->get_seq_number() > act->get_seq_number())
+ nonconflict=act;
+ }
+ }
+ return nonconflict;
+}
+
+/** This routine is operated based on the built threadlists */
+void SCGenerator::collectAnnotatedReads() {
+ for (unsigned i = 1; i < threadlists.size(); i++) {
+ action_list_t *list = &threadlists.at(i);
+ for (action_list_t::iterator it = list->begin(); it != list->end(); it++) {
+ ModelAction *act = *it;
+ if (!IS_SC_ANNO(act))
+ continue;
+ if (!IS_ANNO_BEGIN(act)) {
+ model_print("SC annotation should begin with a BEGIN annotation\n");
+ annotationError = true;
+ return;
+ }
+ act = *++it;
+ while (!IS_ANNO_END(act) && it != list->end()) {
+ // Look for the actions to keep in this loop
+ ModelAction *nextAct = *++it;
+ if (!IS_ANNO_KEEP(nextAct)) { // Annotated reads
+ act->print();
+ annotatedReadSet.put(act, act);
+ annotatedReadSetSize++;
+ if (IS_ANNO_END(nextAct))
+ break;
+ }
+ }
+ if (it == list->end()) {
+ model_print("SC annotation should end with a END annotation\n");
+ annotationError = true;
+ return;
+ }
+ }
+ }
+}
--- /dev/null
+#ifndef _SCGEN_H
+#define _SCGEN_H
+
+#include "hashtable.h"
+#include "memoryorder.h"
+#include "action.h"
+#include "scanalysis.h"
+#include "model.h"
+#include "execution.h"
+#include "threads-model.h"
+#include "clockvector.h"
+#include "sc_annotation.h"
+
+#include <sys/time.h>
+
+typedef struct SCGeneratorOption {
+ bool print_always;
+ bool print_buggy;
+ bool print_nonsc;
+ bool annotationMode;
+} SCGeneratorOption;
+
+
+class SCGenerator {
+public:
+ SCGenerator();
+ ~SCGenerator();
+
+ bool getCyclic();
+ SnapVector<action_list_t>* getDupThreadLists();
+
+ struct sc_statistics* getStats();
+
+ void setExecution(ModelExecution *execution);
+ void setActions(action_list_t *actions);
+ void setPrintAlways(bool val);
+ bool getPrintAlways();
+ bool getHasBadRF();
+
+ void setAnnotationMode(bool val);
+
+ void setPrintBuggy(bool val);
+
+ void setPrintNonSC(bool val);
+
+ action_list_t * getSCList();
+
+ HashTable<const ModelAction *, const ModelAction *, uintptr_t, 4> *getBadrfset();
+
+ HashTable<const ModelAction *, const ModelAction *, uintptr_t, 4 > *getAnnotatedReadSet();
+
+ void print_list(action_list_t *list);
+
+ SNAPSHOTALLOC
+private:
+ /********************** SC-related stuff (beginning) **********************/
+ ModelExecution *execution;
+
+ action_list_t *actions;
+
+ bool fastVersion;
+ bool allowNonSC;
+
+ action_list_t * generateSC(action_list_t *list);
+
+ void update_stats();
+
+ int buildVectors(SnapVector<action_list_t> *threadlist, int *maxthread,
+ action_list_t *list);
+
+ bool updateConstraints(ModelAction *act);
+
+ void computeCV(action_list_t *list);
+
+ bool processReadFast(ModelAction *read, ClockVector *cv);
+
+ bool processReadSlow(ModelAction *read, ClockVector *cv, bool *updateFuture);
+
+ bool processAnnotatedReadSlow(ModelAction *read, ClockVector *cv, bool *updateFuture);
+
+ int getNextActions(ModelAction **array);
+
+ bool merge(ClockVector *cv, const ModelAction *act, const ModelAction *act2);
+
+ void check_rf(action_list_t *list);
+ void check_rf1(action_list_t *list);
+
+ void reset(action_list_t *list);
+
+ ModelAction* pruneArray(ModelAction **array, int count);
+
+ /** This routine is operated based on the built threadlists */
+ void collectAnnotatedReads();
+
+ int maxthreads;
+ HashTable<const ModelAction *, ClockVector *, uintptr_t, 4 > cvmap;
+ bool cyclic;
+ HashTable<const ModelAction *, const ModelAction *, uintptr_t, 4 > badrfset;
+ HashTable<void *, const ModelAction *, uintptr_t, 4 > lastwrmap;
+ SnapVector<action_list_t> threadlists;
+ SnapVector<action_list_t> dup_threadlists;
+ bool print_always;
+ bool print_buggy;
+ bool print_nonsc;
+ bool hasBadRF;
+
+ struct sc_statistics *stats;
+
+ /** The set of read actions that are annotated to be special and will
+ * receive special treatment */
+ HashTable<const ModelAction *, const ModelAction *, uintptr_t, 4 > annotatedReadSet;
+ int annotatedReadSetSize;
+ bool annotationMode;
+ bool annotationError;
+
+ /** A set of actions that should be ignored in the partially SC analysis */
+ HashTable<const ModelAction*, const ModelAction*, uintptr_t, 4> ignoredActions;
+};
+#endif
#define TRACE_ANALYSIS_H
#include "model.h"
+
class TraceAnalysis {
public:
/** setExecution is called once after installation with a reference to
virtual void finish() = 0;
+ /** This method is used to inspect the normal/abnormal model
+ * action. */
+ virtual void inspectModelAction(ModelAction *act) {}
+
+ /** This method will be called by when a plugin is installed by the
+ * model checker. */
+ virtual void actionAtInstallation() {}
+
+ /** This method will be called when the model checker finishes the
+ * executions; With this method, the model checker can alter the
+ * state of the plugin and then the plugin can choose whether or not
+ * restart the model checker. */
+ virtual void actionAtModelCheckingFinish() {}
+
SNAPSHOTALLOC
};
#endif