static void *memory_top;
static RaceSet * raceset;
+#ifdef COLLECT_STAT
+static unsigned int store8_count = 0;
+static unsigned int store16_count = 0;
+static unsigned int store32_count = 0;
+static unsigned int store64_count = 0;
+
+static unsigned int load8_count = 0;
+static unsigned int load16_count = 0;
+static unsigned int load32_count = 0;
+static unsigned int load64_count = 0;
+#endif
+
static const ModelExecution * get_execution()
{
return model->get_execution();
/** This function looks up the entry in the shadow table corresponding to a
* given address.*/
-static uint64_t * lookupAddressEntry(const void *address)
+static inline uint64_t * lookupAddressEntry(const void *address)
{
struct ShadowTable *currtable = root;
#if BIT48
ASSERT(readThread >= 0);
record->thread[0] = readThread;
record->readClock[0] = readClock;
+ } else {
+ record->thread = NULL;
}
if (shadowval & ATOMICMASK)
record->isAtomic = 1;
return hash;
}
-
bool race_equals(struct DataRace *r1, struct DataRace *r2) {
if (r1->numframes != r2->numframes)
return false;
/** This function is called when we detect a data race.*/
static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
{
+#ifdef REPORT_DATA_RACES
struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
race->oldthread = oldthread;
race->oldclock = oldclock;
race->isnewwrite = isnewwrite;
race->address = address;
return race;
+#else
+ return NULL;
+#endif
}
/**
}
/* Check for datarace against last write. */
-
{
modelclock_t writeClock = record->writeClock;
thread_id_t writeThread = record->writeThread;
goto Exit;
}
-
-
{
/* Check for datarace against last read. */
modelclock_t readClock = READVECTOR(shadowval);
{
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
-
/** This function does race detection for a write on an expanded record. */
struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
{
{
/* Check for datarace against last read. */
-
modelclock_t readClock = READVECTOR(shadowval);
thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
{
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
/** This function just updates metadata on atomic write. */
void recordCalloc(void *location, size_t size) {
- thread_id_t thread = thread_current()->get_id();
+ thread_id_t thread = thread_current_id();
for(;size != 0;size--) {
uint64_t *shadow = lookupAddressEntry(location);
uint64_t shadowval = *shadow;
}
}
-
-
/** This function does race detection on a read for an expanded record. */
struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
{
if (clock_may_race(currClock, thread, readClock, readThread)) {
/* Still need this read in vector */
if (copytoindex != i) {
- ASSERT(record->thread[i] >= 0);
- record->readClock[copytoindex] = record->readClock[i];
- record->thread[copytoindex] = record->thread[i];
+ ASSERT(readThread >= 0);
+ record->readClock[copytoindex] = readClock;
+ record->thread[copytoindex] = readThread;
}
copytoindex++;
}
}
if (__builtin_popcount(copytoindex) <= 1) {
- if (copytoindex == 0) {
+ if (copytoindex == 0 && record->thread == NULL) {
int newCapacity = INITCAPACITY;
record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
int newCapacity = copytoindex * 2;
thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
- std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
- std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
+ real_memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
+ real_memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
snapshot_free(record->readClock);
snapshot_free(record->thread);
record->readClock = newreadClock;
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
{
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
}
/* Check for datarace against last write. */
-
modelclock_t writeClock = WRITEVECTOR(shadowval);
thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
return shadow;
}
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
-
-
void raceCheckRead64(thread_id_t thread, const void *location)
{
+ int old_flag = GET_MODEL_FLAG;
+ ENTER_MODEL_FLAG;
+
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ load64_count++;
+#endif
uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 7)) {
if (shadow[1]==old_shadowval)
if (shadow[7]==old_shadowval)
shadow[7] = new_shadowval;
else goto L7;
+ RESTORE_MODEL_FLAG(old_flag);
return;
}
raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
L7:
raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
+ RESTORE_MODEL_FLAG(old_flag);
}
void raceCheckRead32(thread_id_t thread, const void *location)
{
+ int old_flag = GET_MODEL_FLAG;
+ ENTER_MODEL_FLAG;
+
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ load32_count++;
+#endif
uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 3)) {
if (shadow[1]==old_shadowval)
if (shadow[3]==old_shadowval)
shadow[3] = new_shadowval;
else goto L3;
+ RESTORE_MODEL_FLAG(old_flag);
return;
}
raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
L3:
raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+ RESTORE_MODEL_FLAG(old_flag);
}
void raceCheckRead16(thread_id_t thread, const void *location)
{
+ int old_flag = GET_MODEL_FLAG;
+ ENTER_MODEL_FLAG;
+
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ load16_count++;
+#endif
uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 1)) {
if (shadow[1]==old_shadowval) {
shadow[1] = new_shadowval;
+ RESTORE_MODEL_FLAG(old_flag);
return;
}
}
raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+ RESTORE_MODEL_FLAG(old_flag);
}
void raceCheckRead8(thread_id_t thread, const void *location)
{
+ int old_flag = GET_MODEL_FLAG;
+ ENTER_MODEL_FLAG;
+
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ load8_count++;
+#endif
raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
+ RESTORE_MODEL_FLAG(old_flag);
}
static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
return shadow;
Exit:
if (race) {
+#ifdef REPORT_DATA_RACES
race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
if (raceset->add(race))
assert_race(race);
else model_free(race);
+#else
+ model_free(race);
+#endif
}
}
void raceCheckWrite64(thread_id_t thread, const void *location)
{
+ int old_flag = GET_MODEL_FLAG;
+ ENTER_MODEL_FLAG;
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ store64_count++;
+#endif
uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 7)) {
if (shadow[1]==old_shadowval)
if (shadow[7]==old_shadowval)
shadow[7] = new_shadowval;
else goto L7;
+ RESTORE_MODEL_FLAG(old_flag);
return;
}
raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
L7:
raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
+ RESTORE_MODEL_FLAG(old_flag);
}
void raceCheckWrite32(thread_id_t thread, const void *location)
{
+ int old_flag = GET_MODEL_FLAG;
+ ENTER_MODEL_FLAG;
+
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ store32_count++;
+#endif
uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 3)) {
if (shadow[1]==old_shadowval)
if (shadow[3]==old_shadowval)
shadow[3] = new_shadowval;
else goto L3;
+ RESTORE_MODEL_FLAG(old_flag);
return;
}
raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
L3:
raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
+ RESTORE_MODEL_FLAG(old_flag);
}
void raceCheckWrite16(thread_id_t thread, const void *location)
{
+ int old_flag = GET_MODEL_FLAG;
+ ENTER_MODEL_FLAG;
+
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
+#ifdef COLLECT_STAT
+ store16_count++;
+#endif
uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
if (CHECKBOUNDARY(location, 1)) {
if (shadow[1]==old_shadowval) {
shadow[1] = new_shadowval;
+ RESTORE_MODEL_FLAG(old_flag);
return;
}
}
raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
+ RESTORE_MODEL_FLAG(old_flag);
}
void raceCheckWrite8(thread_id_t thread, const void *location)
{
+ int old_flag = GET_MODEL_FLAG;
+ ENTER_MODEL_FLAG;
+
uint64_t old_shadowval, new_shadowval;
old_shadowval = new_shadowval = INVALIDSHADOWVAL;
-
-
+#ifdef COLLECT_STAT
+ store8_count++;
+#endif
raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
+ RESTORE_MODEL_FLAG(old_flag);
}
+
+#ifdef COLLECT_STAT
+void print_normal_accesses()
+{
+ model_print("store 8 count: %u\n", store8_count);
+ model_print("store 16 count: %u\n", store16_count);
+ model_print("store 32 count: %u\n", store32_count);
+ model_print("store 64 count: %u\n", store64_count);
+
+ model_print("load 8 count: %u\n", load8_count);
+ model_print("load 16 count: %u\n", load16_count);
+ model_print("load 32 count: %u\n", load32_count);
+ model_print("load 64 count: %u\n", load64_count);
+}
+#endif