3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
71 struct RaceRecord *record = (struct RaceRecord *)shadowval;
72 return !record->isAtomic && record->writeClock != 0;
76 void setAtomicStoreFlag(const void *address) {
77 uint64_t * shadow = lookupAddressEntry(address);
78 uint64_t shadowval = *shadow;
79 if (ISSHORTRECORD(shadowval)) {
80 *shadow = shadowval | ATOMICMASK;
82 struct RaceRecord *record = (struct RaceRecord *)shadowval;
87 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
88 uint64_t * shadow = lookupAddressEntry(address);
89 uint64_t shadowval = *shadow;
90 if (ISSHORTRECORD(shadowval)) {
91 //Do we have a non atomic write with a non-zero clock
92 *thread = WRTHREADID(shadowval);
93 *clock = WRITEVECTOR(shadowval);
95 struct RaceRecord *record = (struct RaceRecord *)shadowval;
96 *thread = record->writeThread;
97 *clock = record->writeClock;
102 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
103 * to check the potential for a data race.
104 * @param clock1 The current clock vector
105 * @param tid1 The current thread; paired with clock1
106 * @param clock2 The clock value for the potentially-racing action
107 * @param tid2 The thread ID for the potentially-racing action
108 * @return true if the current clock allows a race with the event at clock2/tid2
110 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
111 modelclock_t clock2, thread_id_t tid2)
113 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
117 * Expands a record from the compact form to the full form. This is
118 * necessary for multiple readers or for very large thread ids or time
120 static void expandRecord(uint64_t *shadow)
122 uint64_t shadowval = *shadow;
124 modelclock_t readClock = READVECTOR(shadowval);
125 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
126 modelclock_t writeClock = WRITEVECTOR(shadowval);
127 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
129 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
130 record->writeThread = writeThread;
131 record->writeClock = writeClock;
133 if (readClock != 0) {
134 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
135 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
136 record->numReads = 1;
137 ASSERT(readThread >= 0);
138 record->thread[0] = readThread;
139 record->readClock[0] = readClock;
141 if (shadowval & ATOMICMASK)
142 record->isAtomic = 1;
143 *shadow = (uint64_t) record;
146 #define FIRST_STACK_FRAME 2
148 unsigned int race_hash(struct DataRace *race) {
149 unsigned int hash = 0;
150 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
151 hash ^= ((uintptr_t)race->backtrace[i]);
152 hash = (hash >> 3) | (hash << 29);
158 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
159 if (r1->numframes != r2->numframes)
161 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
162 if (r1->backtrace[i] != r2->backtrace[i])
168 /** This function is called when we detect a data race.*/
169 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
171 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
172 race->oldthread = oldthread;
173 race->oldclock = oldclock;
174 race->isoldwrite = isoldwrite;
175 race->newaction = newaction;
176 race->isnewwrite = isnewwrite;
177 race->address = address;
182 * @brief Assert a data race
184 * Asserts a data race which is currently realized, causing the execution to
185 * end and stashing a message in the model-checker's bug list
187 * @param race The race to report
189 void assert_race(struct DataRace *race)
191 model_print("Race detected at location: \n");
192 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
193 model_print("\nData race detected @ address %p:\n"
194 " Access 1: %5s in thread %2d @ clock %3u\n"
195 " Access 2: %5s in thread %2d @ clock %3u\n\n",
197 race->isoldwrite ? "write" : "read",
198 id_to_int(race->oldthread),
200 race->isnewwrite ? "write" : "read",
201 id_to_int(race->newaction->get_tid()),
202 race->newaction->get_seq_number()
206 /** This function does race detection for a write on an expanded record. */
207 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
209 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
210 struct DataRace * race = NULL;
212 /* Check for datarace against last read. */
214 for (int i = 0;i < record->numReads;i++) {
215 modelclock_t readClock = record->readClock[i];
216 thread_id_t readThread = record->thread[i];
218 /* Note that readClock can't actuall be zero here, so it could be
221 if (clock_may_race(currClock, thread, readClock, readThread)) {
222 /* We have a datarace */
223 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
228 /* Check for datarace against last write. */
231 modelclock_t writeClock = record->writeClock;
232 thread_id_t writeThread = record->writeThread;
234 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
235 /* We have a datarace */
236 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
241 record->numReads = 0;
242 record->writeThread = thread;
243 record->isAtomic = 0;
244 modelclock_t ourClock = currClock->getClock(thread);
245 record->writeClock = ourClock;
249 /** This function does race detection on a write. */
250 void raceCheckWrite(thread_id_t thread, void *location)
252 uint64_t *shadow = lookupAddressEntry(location);
253 uint64_t shadowval = *shadow;
254 ClockVector *currClock = get_execution()->get_cv(thread);
255 struct DataRace * race = NULL;
257 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
258 race = fullRaceCheckWrite(thread, location, shadow, currClock);
263 int threadid = id_to_int(thread);
264 modelclock_t ourClock = currClock->getClock(thread);
266 /* Thread ID is too large or clock is too large. */
267 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
268 expandRecord(shadow);
269 race = fullRaceCheckWrite(thread, location, shadow, currClock);
276 /* Check for datarace against last read. */
278 modelclock_t readClock = READVECTOR(shadowval);
279 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
281 if (clock_may_race(currClock, thread, readClock, readThread)) {
282 /* We have a datarace */
283 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
289 /* Check for datarace against last write. */
291 modelclock_t writeClock = WRITEVECTOR(shadowval);
292 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
294 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
295 /* We have a datarace */
296 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
302 *shadow = ENCODEOP(0, 0, threadid, ourClock);
307 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
308 if (raceset->add(race))
310 else model_free(race);
314 /** This function does race detection for a write on an expanded record. */
315 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
316 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
317 record->numReads = 0;
318 record->writeThread = thread;
319 modelclock_t ourClock = currClock->getClock(thread);
320 record->writeClock = ourClock;
321 record->isAtomic = 1;
324 /** This function just updates metadata on atomic write. */
325 void recordWrite(thread_id_t thread, void *location) {
326 uint64_t *shadow = lookupAddressEntry(location);
327 uint64_t shadowval = *shadow;
328 ClockVector *currClock = get_execution()->get_cv(thread);
330 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
331 fullRecordWrite(thread, location, shadow, currClock);
335 int threadid = id_to_int(thread);
336 modelclock_t ourClock = currClock->getClock(thread);
338 /* Thread ID is too large or clock is too large. */
339 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
340 expandRecord(shadow);
341 fullRecordWrite(thread, location, shadow, currClock);
345 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
350 /** This function does race detection on a read for an expanded record. */
351 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
353 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
354 struct DataRace * race = NULL;
355 /* Check for datarace against last write. */
357 modelclock_t writeClock = record->writeClock;
358 thread_id_t writeThread = record->writeThread;
360 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
361 /* We have a datarace */
362 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
365 /* Shorten vector when possible */
369 for (int i = 0;i < record->numReads;i++) {
370 modelclock_t readClock = record->readClock[i];
371 thread_id_t readThread = record->thread[i];
373 /* Note that is not really a datarace check as reads cannot
374 actually race. It is just determining that this read subsumes
375 another in the sense that either this read races or neither
376 read races. Note that readClock can't actually be zero, so it
377 could be optimized. */
379 if (clock_may_race(currClock, thread, readClock, readThread)) {
380 /* Still need this read in vector */
381 if (copytoindex != i) {
382 ASSERT(record->thread[i] >= 0);
383 record->readClock[copytoindex] = record->readClock[i];
384 record->thread[copytoindex] = record->thread[i];
390 if (__builtin_popcount(copytoindex) <= 1) {
391 if (copytoindex == 0) {
392 int newCapacity = INITCAPACITY;
393 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
394 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
395 } else if (copytoindex>=INITCAPACITY) {
396 int newCapacity = copytoindex * 2;
397 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
398 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
399 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
400 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
401 snapshot_free(record->readClock);
402 snapshot_free(record->thread);
403 record->readClock = newreadClock;
404 record->thread = newthread;
408 modelclock_t ourClock = currClock->getClock(thread);
411 record->thread[copytoindex] = thread;
412 record->readClock[copytoindex] = ourClock;
413 record->numReads = copytoindex + 1;
417 /** This function does race detection on a read. */
418 void raceCheckRead(thread_id_t thread, const void *location)
420 uint64_t *shadow = lookupAddressEntry(location);
421 uint64_t shadowval = *shadow;
422 ClockVector *currClock = get_execution()->get_cv(thread);
423 struct DataRace * race = NULL;
426 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
427 race = fullRaceCheckRead(thread, location, shadow, currClock);
432 int threadid = id_to_int(thread);
433 modelclock_t ourClock = currClock->getClock(thread);
435 /* Thread ID is too large or clock is too large. */
436 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
437 expandRecord(shadow);
438 race = fullRaceCheckRead(thread, location, shadow, currClock);
442 /* Check for datarace against last write. */
444 modelclock_t writeClock = WRITEVECTOR(shadowval);
445 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
447 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
448 /* We have a datarace */
449 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
455 modelclock_t readClock = READVECTOR(shadowval);
456 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
458 if (clock_may_race(currClock, thread, readClock, readThread)) {
459 /* We don't subsume this read... Have to expand record. */
460 expandRecord(shadow);
461 fullRaceCheckRead(thread, location, shadow, currClock);
466 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
470 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
471 if (raceset->add(race))
473 else model_free(race);