/doc/docs
/benchmarks
/README.html
+/cdsspec-compiler/classes
+/cdsspec-compiler/src/edu/uci/eecs/utilParser
#include "config.h"
extern int model_out;
+extern int switch_alloc;
-#define model_print(fmt, ...) do { dprintf(model_out, fmt, ##__VA_ARGS__); } while (0)
+#define model_print(fmt, ...) do { switch_alloc = 1; dprintf(model_out, fmt, ##__VA_ARGS__); switch_alloc = 0; } while (0)
#ifdef CONFIG_DEBUG
#define DEBUG(fmt, ...) do { model_print("*** %15s:%-4d %25s() *** " fmt, __FILE__, __LINE__, __func__, ##__VA_ARGS__); } while (0)
too_many_reads(false),
no_valid_reads(false),
bad_synchronization(false),
+ bad_sc_read(false),
asserted(false)
{ }
bool no_valid_reads;
/** @brief Incorrectly-ordered synchronization was made */
bool bad_synchronization;
+ bool bad_sc_read;
bool asserted;
SNAPSHOTALLOC
priv->bad_synchronization = true;
}
+/** @brief Alert the model-checker that an incorrectly-ordered
+ * synchronization was made */
+void ModelExecution::set_bad_sc_read()
+{
+ priv->bad_sc_read = true;
+}
+
bool ModelExecution::assert_bug(const char *msg)
{
priv->bugs.push_back(new bug_message(msg));
if (rf) {
if (r_modification_order(act, rf))
updated = true;
+ if (act->is_seqcst()) {
+ ModelAction *last_sc_write = get_last_seq_cst_write(act);
+ if (last_sc_write != NULL && rf->happens_before(last_sc_write)) {
+ set_bad_sc_read();
+ }
+ }
} else if (promise) {
if (r_modification_order(act, promise))
updated = true;
ptr += sprintf(ptr, "[no valid reads-from]");
if (priv->bad_synchronization)
ptr += sprintf(ptr, "[bad sw ordering]");
+ if (priv->bad_sc_read)
+ ptr += sprintf(ptr, "[bad sc read]");
if (promises_expired())
ptr += sprintf(ptr, "[promise expired]");
if (promises.size() != 0)
priv->no_valid_reads ||
priv->too_many_reads ||
priv->bad_synchronization ||
+ priv->bad_sc_read ||
priv->hard_failed_promise ||
promises_expired();
}
action_list_t::reverse_iterator rit;
for (rit = list->rbegin(); rit != list->rend(); rit++) {
ModelAction *act = *rit;
- // FIXME: The read from promise might read from an annotation
- if (act->get_type() == ATOMIC_ANNOTATION)
- continue;
/* Skip curr */
if (act == curr)
}
}
- /* C++, Section 29.3 statement 3 (second subpoint) */
- if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
- added = mo_graph->addEdge(act, rf) || added;
- break;
- }
-
/*
* Include at most one act per-thread that "happens
* before" curr
added = mo_graph->addEdge(act, curr) || added;
else if (act->is_read()) {
//if previous read accessed a null, just keep going
- if (act->get_reads_from() == NULL)
- continue;
- added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
+ if (act->get_reads_from() == NULL) {
+ added = mo_graph->addEdge(act->get_reads_from_promise(), curr) || added;
+ } else
+ added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
}
break;
} else if (act->is_read() && !act->could_synchronize_with(curr) &&
action_list_t * get_action_trace() { return &action_trace; }
CycleGraph * const get_mo_graph() { return mo_graph; }
-
+
int get_execution_number() const;
SNAPSHOTALLOC
bool mo_may_allow(const ModelAction *writer, const ModelAction *reader);
bool promises_may_allow(const ModelAction *writer, const ModelAction *reader) const;
void set_bad_synchronization();
+ void set_bad_sc_read();
bool promises_expired() const;
bool should_wake_up(const ModelAction *curr, const Thread *thread) const;
void wake_up_sleeping_actions(ModelAction *curr);
void atomic_flag_clear( volatile atomic_flag* __a__ )
{ atomic_flag_clear_explicit( __a__, memory_order_seq_cst ); }
-void __atomic_flag_wait__( volatile atomic_flag* __a__ )
-{ while ( atomic_flag_test_and_set( __a__ ) ); }
+void __atomic_flag_wait__( volatile atomic_flag* __a__ ) {
+ while ( atomic_flag_test_and_set( __a__ ) )
+ ;
+}
void __atomic_flag_wait_explicit__( volatile atomic_flag* __a__,
- memory_order __x__ )
-{ while ( atomic_flag_test_and_set_explicit( __a__, __x__ ) ); }
+ memory_order __x__ ) {
+ while ( atomic_flag_test_and_set_explicit( __a__, __x__ ))
+ ;
+}
}
#define CDS_ANNOTATE_H
#include <stdint.h>
-//#ifdef __cplusplus
-//extern "C" {
-//#endif
void cdsannotate(uint64_t analysistype, void *annotation);
-//#ifdef __cplusplus
-//}
-//#endif
-
#endif
inline void* atomic_fetch_add_explicit
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
{
- void* volatile* __p__ = &((__a__)->__f__);
- void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__);
- model_rmw_action((void *)__p__, __x__, (uint64_t) ((char*)(*__p__) + __m__));
- return __r__; }
+ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__);
+ __typeof__((__a__)->__f__) __copy__= __old__;
+ __copy__ = (void *) (((char *)__copy__) + __m__);
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
+ return __old__;
+}
-inline void* atomic_fetch_add
+ inline void* atomic_fetch_add
( volatile atomic_address* __a__, ptrdiff_t __m__ )
{ return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
inline void* atomic_fetch_sub_explicit
( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
-{
- void* volatile* __p__ = &((__a__)->__f__);
- void* __r__ = (void *) model_rmwr_action((void *)__p__, __x__);
- model_rmw_action((void *)__p__, __x__, (uint64_t)((char*)(*__p__) - __m__));
- return __r__; }
+{ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);
+ __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__);
+ __typeof__((__a__)->__f__) __copy__= __old__;
+ __copy__ = (void *) (((char *)__copy__) - __m__);
+ model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
+ return __old__;
+}
inline void* atomic_fetch_sub
( volatile atomic_address* __a__, ptrdiff_t __m__ )
{"enabled", required_argument, NULL, 'e'},
{"bound", required_argument, NULL, 'b'},
{"verbose", optional_argument, NULL, 'v'},
- {"uninitialized", optional_argument, NULL, 'u'},
- {"analysis", optional_argument, NULL, 't'},
- {"options", optional_argument, NULL, 'o'},
+ {"uninitialized", required_argument, NULL, 'u'},
+ {"analysis", required_argument, NULL, 't'},
+ {"options", required_argument, NULL, 'o'},
{"maxexecutions", required_argument, NULL, 'x'},
{0, 0, 0, 0} /* Terminator */
};
TraceAnalysis * ta=(*installedanalysis)[i];
ta->setExecution(execution);
model->add_trace_analysis(ta);
+ /** Call the installation event for each installed plugin */
+ ta->actionAtInstallation();
}
}
main_argc = argc;
main_argv = argv;
+ /*
+ * If this printf statement is removed, CDSChecker will fail on an
+ * assert on some versions of glibc. The first time printf is
+ * called, it allocated internal buffers. We can't easily snapshot
+ * libc since we also use it.
+ */
+
+ printf("CDSChecker\n"
+ "Copyright (c) 2013 Regents of the University of California. All rights reserved.\n"
+ "Distributed under the GPLv2\n"
+ "Written by Brian Norris and Brian Demsky\n\n");
+
/* Configure output redirection for the model-checker */
redirect_output();
#include <algorithm>
#include <new>
#include <stdarg.h>
+#include <string.h>
#include "model.h"
#include "action.h"
ModelChecker::ModelChecker(struct model_params params) :
/* Initialize default scheduler */
params(params),
+ restart_flag(false),
+ exit_flag(false),
scheduler(new Scheduler()),
node_stack(new NodeStack()),
execution(new ModelExecution(this, &this->params, scheduler, node_stack)),
execution_number(1),
diverge(NULL),
earliest_diverge(NULL),
- trace_analyses()
+ trace_analyses(),
+ inspect_plugin(NULL)
{
+ memset(&stats,0,sizeof(struct execution_stats));
}
/** @brief Destructor */
checkDataRaces();
run_trace_analyses();
+ } else if (inspect_plugin && !execution->is_complete_execution() &&
+ (execution->too_many_steps())) {
+ inspect_plugin->analyze(execution->get_action_trace());
}
record_stats();
if (complete)
earliest_diverge = NULL;
+ if (restart_flag) {
+ do_restart();
+ return true;
+ }
+
+ if (exit_flag)
+ return false;
+
if ((diverge = execution->get_next_backtrack()) == NULL)
return false;
/** @brief Run trace analyses on complete trace */
void ModelChecker::run_trace_analyses() {
- IN_TRACE_ANALYSIS = true;
for (unsigned int i = 0; i < trace_analyses.size(); i++)
trace_analyses[i]->analyze(execution->get_action_trace());
- IN_TRACE_ANALYSIS = false;
}
/**
Thread *old = thread_current();
scheduler->set_current_thread(NULL);
ASSERT(!old->get_pending());
+ if (inspect_plugin != NULL) {
+ inspect_plugin->inspectModelAction(act);
+ }
old->set_pending(act);
if (Thread::swap(old, &system_context) < 0) {
perror("swap threads");
return false;
}
+/** @brief Exit ModelChecker upon returning to the run loop of the
+ * model checker. */
+void ModelChecker::exit_model_checker()
+{
+ exit_flag = true;
+}
+
+/** @brief Restart ModelChecker upon returning to the run loop of the
+ * model checker. */
+void ModelChecker::restart()
+{
+ restart_flag = true;
+}
+
+void ModelChecker::do_restart()
+{
+ restart_flag = false;
+ diverge = NULL;
+ earliest_diverge = NULL;
+ reset_to_initial_state();
+ node_stack->full_reset();
+ memset(&stats,0,sizeof(struct execution_stats));
+ execution_number = 1;
+}
+
/** @brief Run ModelChecker for the user program */
void ModelChecker::run()
{
+ bool has_next;
do {
thrd_t user_thread;
Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL);
t = execution->take_step(curr);
} while (!should_terminate_execution());
- } while (next_execution());
+ has_next = next_execution();
+ if (inspect_plugin != NULL && !has_next) {
+ inspect_plugin->actionAtModelCheckingFinish();
+ // Check if the inpect plugin set the restart flag
+ if (restart_flag) {
+ model_print("******* Model-checking RESTART: *******\n");
+ has_next = true;
+ do_restart();
+ }
+ }
+ } while (has_next);
execution->fixup_release_sequences();
void run();
+ /** Restart the model checker, intended for pluggins. */
+ void restart();
+
+ /** Exit the model checker, intended for pluggins. */
+ void exit_model_checker();
+
+ /** Check the exit_flag. */
+ bool get_exit_flag() const { return exit_flag; }
+
/** @returns the context for the main model-checking system thread */
ucontext_t * get_system_context() { return &system_context; }
void assert_user_bug(const char *msg);
const model_params params;
- void add_trace_analysis(TraceAnalysis *a) {
- trace_analyses.push_back(a);
- }
-
+ void add_trace_analysis(TraceAnalysis *a) { trace_analyses.push_back(a); }
+ void set_inspect_plugin(TraceAnalysis *a) { inspect_plugin=a; }
MEMALLOC
private:
+ /** Flag indicates whether to restart the model checker. */
+ bool restart_flag;
+ /** Flag indicates whether to exit the model checker. */
+ bool exit_flag;
+
/** The scheduler to use: tracks the running/ready Threads */
Scheduler * const scheduler;
NodeStack * const node_stack;
ModelVector<TraceAnalysis *> trace_analyses;
+ /** @bref Implement restart. */
+ void do_restart();
+ /** @bref Plugin that can inspect new actions. */
+ TraceAnalysis *inspect_plugin;
/** @brief The cumulative execution stats */
struct execution_stats stats;
void record_stats();
#define REQUESTS_BEFORE_ALLOC 1024
-bool IN_TRACE_ANALYSIS = false;
-
size_t allocatedReqs[REQUESTS_BEFORE_ALLOC] = { 0 };
int nextRequest = 0;
int howManyFreed = 0;
+int switch_alloc = 0;
#if !USE_MPROTECT_SNAPSHOT
static mspace sStaticSpace = NULL;
#endif
/** Bootstrap allocation. Problem is that the dynamic linker calls require
* calloc to work and calloc requires the dynamic linker to work. */
-#define BOOTSTRAPBYTES 4096
+#define BOOTSTRAPBYTES 131072
char bootstrapmemory[BOOTSTRAPBYTES];
size_t offset = 0;
sz = (sz + 7) & ~7;
if (sz > (BOOTSTRAPBYTES-offset)) {
- model_print("OUT OF BOOTSTRAP MEMORY\n");
+ model_print("OUT OF BOOTSTRAP MEMORY. Increase the size of BOOTSTRAPBYTES in mymemory.cc\n");
exit(EXIT_FAILURE);
}
void *malloc(size_t size)
{
if (user_snapshot_space) {
+ if (switch_alloc) {
+ return model_malloc(size);
+ }
/* Only perform user allocations from user context */
- ASSERT(!model || thread_current() || IN_TRACE_ANALYSIS);
+ ASSERT(!model || thread_current());
return user_malloc(size);
} else
return HandleEarlyAllocationRequest(size);
/** @brief Snapshotting free implementation for user programs */
void free(void * ptr)
{
- if (!DontFree(ptr))
+ if (!DontFree(ptr)) {
+ if (switch_alloc) {
+ return model_free(ptr);
+ }
mspace_free(user_snapshot_space, ptr);
+ }
}
/** @brief Snapshotting realloc implementation for user programs */
return p; \
}
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-extern bool IN_TRACE_ANALYSIS;
-
void *model_malloc(size_t size);
void *model_calloc(size_t count, size_t size);
void model_free(void *ptr);
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
void * snapshot_malloc(size_t size);
void * snapshot_calloc(size_t count, size_t size);
void * snapshot_realloc(void *ptr, size_t size);
void snapshot_free(void *ptr);
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
void * Thread_malloc(size_t size);
void Thread_free(void *ptr);
node_list.back()->clear_backtracking();
}
+/** Reset the node stack. */
+void NodeStack::full_reset()
+{
+ for (unsigned int i = 0; i < node_list.size(); i++)
+ delete node_list[i];
+ node_list.clear();
+ reset_execution();
+ total_nodes = 1;
+}
+
Node * NodeStack::get_head() const
{
if (node_list.empty() || head_idx < 0)
Node * get_next() const;
void reset_execution();
void pop_restofstack(int numAhead);
+ void full_reset();
int get_total_nodes() { return total_nodes; }
void print() const;
int val;
TagInt(unsigned int tag, int val) : tag(tag), val(val) { }
-
+
TagInt(int val) : tag(0), val(val) { }
}TagInt;
typedef SnapSet<int> IntSet;
template<typename Key, typename Value>
-class Map: public unordered_map<Key, Value> {
+class Map: public unordered_map<Key, Value, std::hash<Key>,
+ std::equal_to<Value>, SnapshotAlloc<std::pair<const Key, Value>>> {
public:
- typedef unordered_map<Key, Value> map;
-
- Map() : map() { }
-
Value get(Key key) {
return (*this)[key];
}
(*this)[key] = value;
}
- SNAPSHOTALLOC
+ SNAPSHOTALLOC
};
-class IntMap : public unordered_map<int, int> {
- public:
- typedef unordered_map<int, int> map;
-
- IntMap() : map() { }
-
- int get(int key) {
- return (*this)[key];
- }
-
- void put(int key, int value) {
- (*this)[key] = value;
- }
-
- SNAPSHOTALLOC
-};
+typedef Map<int, int> IntMap;
typedef SnapVector<double> DoubleVector;
typedef SnapList<double> DoubleList;
-#!/bin/bash
-#
+#!/bin/bash -e
-BENCHS=(ms-queue linuxrwlocks mcs-lock chase-lev-deque-bugfix treiber-stack
- ticket-lock seqlock read-copy-update concurrent-hashmap spsc-bugfix mpmc-queue)
+BENCHS=(
+ 'ms-queue'\
+ 'linuxrwlocks' \
+ 'mcs-lock' \
+ 'chase-lev-deque-bugfix' \
+ 'ticket-lock' \
+ 'seqlock' \
+ 'read-copy-update' \
+ 'spsc-bugfix' \
+ 'mpmc-queue'
+ 'concurrent-hashmap' \
+)
-echo ${BENCHS[*]}
-for i in ${BENCHS[*]}; do
- echo "---------- Start to run $i ----------"
- tests=$(ls $i/testcase[1-9])
- if [ -e "$i/main" ]; then
- tests=($tests "$i/main")
- fi
- if [ -e "$i/testcase" ]; then
- tests=($tests "$i/testcase")
- fi
+for bench in ${BENCHS[@]}; do
+ echo "---------- Start to run ${bench} ----------"
+ # Clear the test cases.
+ tests=()
+ tests=$(ls ${bench}/testcase[1-9] 2>/dev/null || true)
+
+ [ -e "${bench}/main" ] && tests+=("${bench}/main")
+ [ -e "${bench}/testcase" ] && tests+=("${bench}/testcase")
for j in ${tests[*]}; do
./run.sh $j -m2 -y -u3 -tSPEC
/** Allocate a stack for a new thread. */
static void * stack_allocate(size_t size)
{
- return snapshot_malloc(size);
+ return Thread_malloc(size);
}
/** Free a stack for a terminated thread. */
static void stack_free(void *stack)
{
- snapshot_free(stack);
+ Thread_free(stack);
}
/**
#define TRACE_ANALYSIS_H
#include "model.h"
+
class TraceAnalysis {
public:
/** setExecution is called once after installation with a reference to
virtual void finish() = 0;
+ /** This method is used to inspect the normal/abnormal model
+ * action. */
+ virtual void inspectModelAction(ModelAction *act) {}
+
+ /** This method will be called by when a plugin is installed by the
+ * model checker. */
+ virtual void actionAtInstallation() {}
+
+ /** This method will be called when the model checker finishes the
+ * executions; With this method, the model checker can alter the
+ * state of the plugin and then the plugin can choose whether or not
+ * restart the model checker. */
+ virtual void actionAtModelCheckingFinish() {}
+
SNAPSHOTALLOC
};
#endif