static void *memory_top;
static RaceSet * raceset;
+#ifdef COLLECT_STAT
+static unsigned int store8_count = 0;
+static unsigned int store16_count = 0;
+static unsigned int store32_count = 0;
+static unsigned int store64_count = 0;
+
+static unsigned int load8_count = 0;
+static unsigned int load16_count = 0;
+static unsigned int load32_count = 0;
+static unsigned int load64_count = 0;
+#endif
+
static const ModelExecution * get_execution()
{
return model->get_execution();
ASSERT(readThread >= 0);
record->thread[0] = readThread;
record->readClock[0] = readClock;
+ } else {
+ record->thread = NULL;
}
if (shadowval & ATOMICMASK)
record->isAtomic = 1;
return hash;
}
-
bool race_equals(struct DataRace *r1, struct DataRace *r2) {
if (r1->numframes != r2->numframes)
return false;
/** This function is called when we detect a data race.*/
static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
{
+#ifdef REPORT_DATA_RACES
struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
race->oldthread = oldthread;
race->oldclock = oldclock;
race->isnewwrite = isnewwrite;
race->address = address;
return race;
+#else
+ return NULL;
+#endif
}
/**
}
/* Check for datarace against last write. */
-
{
modelclock_t writeClock = record->writeClock;
thread_id_t writeThread = record->writeThread;
goto Exit;
}
-
-
{
/* Check for datarace against last read. */
modelclock_t readClock = READVECTOR(shadowval);
{
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
-
/** This function does race detection for a write on an expanded record. */
struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
{
{
/* Check for datarace against last read. */
-
modelclock_t readClock = READVECTOR(shadowval);
thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
{
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
}
}
-
-
/** This function does race detection on a read for an expanded record. */
struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
{
}
if (__builtin_popcount(copytoindex) <= 1) {
- if (copytoindex == 0) {
+ if (copytoindex == 0 && record->thread == NULL) {
int newCapacity = INITCAPACITY;
record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
{
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
}
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
return shadow;
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
-
-
void raceCheckRead64(thread_id_t thread, const void *location)
{
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ load64_count++;
+#endif
uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 7)) {
if (shadow[1]==old_shadowval)
{
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ load32_count++;
+#endif
uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 3)) {
if (shadow[1]==old_shadowval)
{
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ load16_count++;
+#endif
uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 1)) {
if (shadow[1]==old_shadowval) {
{
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ load8_count++;
+#endif
raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
return shadow;
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
{
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ store64_count++;
+#endif
uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 7)) {
if (shadow[1]==old_shadowval)
{
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ store32_count++;
+#endif
uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 3)) {
if (shadow[1]==old_shadowval)
{
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
+#ifdef COLLECT_STAT
+ store16_count++;
+#endif
uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 1)) {
{
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ store8_count++;
+#endif
raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
}
+
+#ifdef COLLECT_STAT
+void print_normal_accesses()
+{
+ model_print("store 8 count: %u\n", store8_count);
+ model_print("store 16 count: %u\n", store16_count);
+ model_print("store 32 count: %u\n", store32_count);
+ model_print("store 64 count: %u\n", store64_count);
+
+ model_print("load 8 count: %u\n", load8_count);
+ model_print("load 16 count: %u\n", load16_count);
+ model_print("load 32 count: %u\n", load32_count);
+ model_print("load 64 count: %u\n", load64_count);
+}
+#endif