3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return !(ATOMICMASK & shadowval);
73 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74 return !record->isAtomic;
78 void setAtomicStoreFlag(const void *address) {
79 uint64_t * shadow = lookupAddressEntry(address);
80 uint64_t shadowval = *shadow;
81 if (ISSHORTRECORD(shadowval)) {
82 *shadow = shadowval | ATOMICMASK;
85 *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
88 struct RaceRecord *record = (struct RaceRecord *)shadowval;
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94 uint64_t * shadow = lookupAddressEntry(address);
95 uint64_t shadowval = *shadow;
96 if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97 //Do we have a non atomic write with a non-zero clock
98 *thread = WRTHREADID(shadowval);
99 *clock = WRITEVECTOR(shadowval);
101 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102 *thread = record->writeThread;
103 *clock = record->writeClock;
108 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109 * to check the potential for a data race.
110 * @param clock1 The current clock vector
111 * @param tid1 The current thread; paired with clock1
112 * @param clock2 The clock value for the potentially-racing action
113 * @param tid2 The thread ID for the potentially-racing action
114 * @return true if the current clock allows a race with the event at clock2/tid2
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117 modelclock_t clock2, thread_id_t tid2)
119 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
123 * Expands a record from the compact form to the full form. This is
124 * necessary for multiple readers or for very large thread ids or time
126 static void expandRecord(uint64_t *shadow)
128 uint64_t shadowval = *shadow;
130 modelclock_t readClock = READVECTOR(shadowval);
131 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132 modelclock_t writeClock = WRITEVECTOR(shadowval);
133 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
135 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136 record->writeThread = writeThread;
137 record->writeClock = writeClock;
139 if (readClock != 0) {
140 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142 record->numReads = 1;
143 ASSERT(readThread >= 0);
144 record->thread[0] = readThread;
145 record->readClock[0] = readClock;
147 if (shadowval & ATOMICMASK)
148 record->isAtomic = 1;
149 *shadow = (uint64_t) record;
152 #define FIRST_STACK_FRAME 2
154 unsigned int race_hash(struct DataRace *race) {
155 unsigned int hash = 0;
156 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
157 hash ^= ((uintptr_t)race->backtrace[i]);
158 hash = (hash >> 3) | (hash << 29);
163 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
164 if (r1->numframes != r2->numframes)
166 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
167 if (r1->backtrace[i] != r2->backtrace[i])
173 /** This function is called when we detect a data race.*/
174 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
176 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
177 race->oldthread = oldthread;
178 race->oldclock = oldclock;
179 race->isoldwrite = isoldwrite;
180 race->newaction = newaction;
181 race->isnewwrite = isnewwrite;
182 race->address = address;
187 * @brief Assert a data race
189 * Asserts a data race which is currently realized, causing the execution to
190 * end and stashing a message in the model-checker's bug list
192 * @param race The race to report
194 void assert_race(struct DataRace *race)
196 model_print("Race detected at location: \n");
197 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
198 model_print("\nData race detected @ address %p:\n"
199 " Access 1: %5s in thread %2d @ clock %3u\n"
200 " Access 2: %5s in thread %2d @ clock %3u\n\n",
202 race->isoldwrite ? "write" : "read",
203 id_to_int(race->oldthread),
205 race->isnewwrite ? "write" : "read",
206 id_to_int(race->newaction->get_tid()),
207 race->newaction->get_seq_number()
211 /** This function does race detection for a write on an expanded record. */
212 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
214 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
215 struct DataRace * race = NULL;
217 /* Check for datarace against last read. */
219 for (int i = 0;i < record->numReads;i++) {
220 modelclock_t readClock = record->readClock[i];
221 thread_id_t readThread = record->thread[i];
223 /* Note that readClock can't actuall be zero here, so it could be
226 if (clock_may_race(currClock, thread, readClock, readThread)) {
227 /* We have a datarace */
228 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
233 /* Check for datarace against last write. */
235 modelclock_t writeClock = record->writeClock;
236 thread_id_t writeThread = record->writeThread;
238 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
239 /* We have a datarace */
240 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
245 record->numReads = 0;
246 record->writeThread = thread;
247 record->isAtomic = 0;
248 modelclock_t ourClock = currClock->getClock(thread);
249 record->writeClock = ourClock;
253 /** This function does race detection on a write. */
254 void raceCheckWrite(thread_id_t thread, void *location)
256 uint64_t *shadow = lookupAddressEntry(location);
257 uint64_t shadowval = *shadow;
258 ClockVector *currClock = get_execution()->get_cv(thread);
259 if (currClock == NULL)
262 struct DataRace * race = NULL;
264 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
265 race = fullRaceCheckWrite(thread, location, shadow, currClock);
270 int threadid = id_to_int(thread);
271 modelclock_t ourClock = currClock->getClock(thread);
273 /* Thread ID is too large or clock is too large. */
274 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
275 expandRecord(shadow);
276 race = fullRaceCheckWrite(thread, location, shadow, currClock);
281 /* Check for datarace against last read. */
282 modelclock_t readClock = READVECTOR(shadowval);
283 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
285 if (clock_may_race(currClock, thread, readClock, readThread)) {
286 /* We have a datarace */
287 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
293 /* Check for datarace against last write. */
294 modelclock_t writeClock = WRITEVECTOR(shadowval);
295 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
297 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
298 /* We have a datarace */
299 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
305 *shadow = ENCODEOP(0, 0, threadid, ourClock);
310 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
311 if (raceset->add(race))
313 else model_free(race);
317 /** This function does race detection for a write on an expanded record. */
318 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
320 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
321 struct DataRace * race = NULL;
323 if (record->isAtomic)
326 /* Check for datarace against last read. */
328 for (int i = 0;i < record->numReads;i++) {
329 modelclock_t readClock = record->readClock[i];
330 thread_id_t readThread = record->thread[i];
332 /* Note that readClock can't actuall be zero here, so it could be
335 if (clock_may_race(currClock, thread, readClock, readThread)) {
336 /* We have a datarace */
337 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
342 /* Check for datarace against last write. */
345 modelclock_t writeClock = record->writeClock;
346 thread_id_t writeThread = record->writeThread;
348 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
349 /* We have a datarace */
350 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
355 record->numReads = 0;
356 record->writeThread = thread;
357 record->isAtomic = 1;
358 modelclock_t ourClock = currClock->getClock(thread);
359 record->writeClock = ourClock;
363 /** This function does race detection on a write. */
364 void atomraceCheckWrite(thread_id_t thread, void *location)
366 uint64_t *shadow = lookupAddressEntry(location);
367 uint64_t shadowval = *shadow;
368 ClockVector *currClock = get_execution()->get_cv(thread);
369 if (currClock == NULL)
372 struct DataRace * race = NULL;
374 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
375 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
380 int threadid = id_to_int(thread);
381 modelclock_t ourClock = currClock->getClock(thread);
383 /* Thread ID is too large or clock is too large. */
384 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
385 expandRecord(shadow);
386 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
390 /* Can't race with atomic */
391 if (shadowval & ATOMICMASK)
395 /* Check for datarace against last read. */
396 modelclock_t readClock = READVECTOR(shadowval);
397 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
399 if (clock_may_race(currClock, thread, readClock, readThread)) {
400 /* We have a datarace */
401 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
407 /* Check for datarace against last write. */
408 modelclock_t writeClock = WRITEVECTOR(shadowval);
409 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
411 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
412 /* We have a datarace */
413 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
419 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
424 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
425 if (raceset->add(race))
427 else model_free(race);
431 /** This function does race detection for a write on an expanded record. */
432 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
433 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
434 record->numReads = 0;
435 record->writeThread = thread;
436 modelclock_t ourClock = currClock->getClock(thread);
437 record->writeClock = ourClock;
438 record->isAtomic = 1;
441 /** This function does race detection for a write on an expanded record. */
442 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
443 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
444 record->numReads = 0;
445 record->writeThread = thread;
446 modelclock_t ourClock = currClock->getClock(thread);
447 record->writeClock = ourClock;
448 record->isAtomic = 0;
451 /** This function just updates metadata on atomic write. */
452 void recordWrite(thread_id_t thread, void *location) {
453 uint64_t *shadow = lookupAddressEntry(location);
454 uint64_t shadowval = *shadow;
455 ClockVector *currClock = get_execution()->get_cv(thread);
457 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
458 fullRecordWrite(thread, location, shadow, currClock);
462 int threadid = id_to_int(thread);
463 modelclock_t ourClock = currClock->getClock(thread);
465 /* Thread ID is too large or clock is too large. */
466 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
467 expandRecord(shadow);
468 fullRecordWrite(thread, location, shadow, currClock);
472 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
475 /** This function just updates metadata on atomic write. */
476 void recordCalloc(void *location, size_t size) {
477 thread_id_t thread = thread_current()->get_id();
478 for(;size != 0;size--) {
479 uint64_t *shadow = lookupAddressEntry(location);
480 uint64_t shadowval = *shadow;
481 ClockVector *currClock = get_execution()->get_cv(thread);
483 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
484 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
488 int threadid = id_to_int(thread);
489 modelclock_t ourClock = currClock->getClock(thread);
491 /* Thread ID is too large or clock is too large. */
492 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
493 expandRecord(shadow);
494 fullRecordWriteNonAtomic(thread, location, shadow, currClock);
498 *shadow = ENCODEOP(0, 0, threadid, ourClock);
499 location = (void *)(((char *) location) + 1);
503 /** This function does race detection on a read for an expanded record. */
504 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
506 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
507 struct DataRace * race = NULL;
508 /* Check for datarace against last write. */
510 modelclock_t writeClock = record->writeClock;
511 thread_id_t writeThread = record->writeThread;
513 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
514 /* We have a datarace */
515 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
518 /* Shorten vector when possible */
522 for (int i = 0;i < record->numReads;i++) {
523 modelclock_t readClock = record->readClock[i];
524 thread_id_t readThread = record->thread[i];
526 /* Note that is not really a datarace check as reads cannot
527 actually race. It is just determining that this read subsumes
528 another in the sense that either this read races or neither
529 read races. Note that readClock can't actually be zero, so it
530 could be optimized. */
532 if (clock_may_race(currClock, thread, readClock, readThread)) {
533 /* Still need this read in vector */
534 if (copytoindex != i) {
535 ASSERT(record->thread[i] >= 0);
536 record->readClock[copytoindex] = record->readClock[i];
537 record->thread[copytoindex] = record->thread[i];
543 if (__builtin_popcount(copytoindex) <= 1) {
544 if (copytoindex == 0) {
545 int newCapacity = INITCAPACITY;
546 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
547 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
548 } else if (copytoindex>=INITCAPACITY) {
549 int newCapacity = copytoindex * 2;
550 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
551 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
552 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
553 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
554 snapshot_free(record->readClock);
555 snapshot_free(record->thread);
556 record->readClock = newreadClock;
557 record->thread = newthread;
561 modelclock_t ourClock = currClock->getClock(thread);
564 record->thread[copytoindex] = thread;
565 record->readClock[copytoindex] = ourClock;
566 record->numReads = copytoindex + 1;
570 /** This function does race detection on a read. */
571 void raceCheckRead(thread_id_t thread, const void *location)
573 uint64_t *shadow = lookupAddressEntry(location);
574 uint64_t shadowval = *shadow;
575 ClockVector *currClock = get_execution()->get_cv(thread);
576 if (currClock == NULL)
579 struct DataRace * race = NULL;
582 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
583 race = fullRaceCheckRead(thread, location, shadow, currClock);
588 int threadid = id_to_int(thread);
589 modelclock_t ourClock = currClock->getClock(thread);
591 /* Thread ID is too large or clock is too large. */
592 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
593 expandRecord(shadow);
594 race = fullRaceCheckRead(thread, location, shadow, currClock);
598 /* Check for datarace against last write. */
600 modelclock_t writeClock = WRITEVECTOR(shadowval);
601 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
603 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
604 /* We have a datarace */
605 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
611 modelclock_t readClock = READVECTOR(shadowval);
612 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
614 if (clock_may_race(currClock, thread, readClock, readThread)) {
615 /* We don't subsume this read... Have to expand record. */
616 expandRecord(shadow);
617 fullRaceCheckRead(thread, location, shadow, currClock);
622 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
626 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
627 if (raceset->add(race))
629 else model_free(race);
634 /** This function does race detection on a read for an expanded record. */
635 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
637 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
638 struct DataRace * race = NULL;
639 /* Check for datarace against last write. */
640 if (record->isAtomic)
643 modelclock_t writeClock = record->writeClock;
644 thread_id_t writeThread = record->writeThread;
646 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
647 /* We have a datarace */
648 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
653 /** This function does race detection on a read. */
654 void atomraceCheckRead(thread_id_t thread, const void *location)
656 uint64_t *shadow = lookupAddressEntry(location);
657 uint64_t shadowval = *shadow;
658 ClockVector *currClock = get_execution()->get_cv(thread);
659 if (currClock == NULL)
662 struct DataRace * race = NULL;
665 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
666 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
670 if (shadowval & ATOMICMASK)
674 /* Check for datarace against last write. */
675 modelclock_t writeClock = WRITEVECTOR(shadowval);
676 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
678 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
679 /* We have a datarace */
680 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
688 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
689 if (raceset->add(race))
691 else model_free(race);
695 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
697 uint64_t *shadow = lookupAddressEntry(location);
698 uint64_t shadowval = *shadow;
700 ClockVector *currClock = get_execution()->get_cv(thread);
701 if (currClock == NULL)
704 struct DataRace * race = NULL;
707 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
708 race = fullRaceCheckRead(thread, location, shadow, currClock);
713 int threadid = id_to_int(thread);
714 modelclock_t ourClock = currClock->getClock(thread);
716 /* Thread ID is too large or clock is too large. */
717 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
718 expandRecord(shadow);
719 race = fullRaceCheckRead(thread, location, shadow, currClock);
723 /* Check for datarace against last write. */
724 modelclock_t writeClock = WRITEVECTOR(shadowval);
725 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
727 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
728 /* We have a datarace */
729 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
732 modelclock_t readClock = READVECTOR(shadowval);
733 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
735 if (clock_may_race(currClock, thread, readClock, readThread)) {
736 /* We don't subsume this read... Have to expand record. */
737 expandRecord(shadow);
738 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
739 record->thread[1] = thread;
740 record->readClock[1] = ourClock;
746 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
748 *old_val = shadowval;
753 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
754 if (raceset->add(race))
756 else model_free(race);
762 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
763 uint64_t *shadow = lookupAddressEntry(location);
765 uint64_t shadowval = *shadow;
767 ClockVector *currClock = get_execution()->get_cv(thread);
768 if (currClock == NULL)
771 struct DataRace * race = NULL;
774 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
775 race = fullRaceCheckRead(thread, location, shadow, currClock);
780 int threadid = id_to_int(thread);
781 modelclock_t ourClock = currClock->getClock(thread);
783 /* Thread ID is too large or clock is too large. */
784 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
785 expandRecord(shadow);
786 race = fullRaceCheckRead(thread, location, shadow, currClock);
790 /* Check for datarace against last write. */
791 modelclock_t writeClock = WRITEVECTOR(shadowval);
792 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
794 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
795 /* We have a datarace */
796 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
799 modelclock_t readClock = READVECTOR(shadowval);
800 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
802 if (clock_may_race(currClock, thread, readClock, readThread)) {
803 /* We don't subsume this read... Have to expand record. */
804 expandRecord(shadow);
805 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
806 record->thread[1] = thread;
807 record->readClock[1] = ourClock;
813 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
817 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
818 if (raceset->add(race))
820 else model_free(race);
824 void raceCheckRead64(thread_id_t thread, const void *location)
826 uint64_t old_shadowval, new_shadowval;
827 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
829 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
830 if (CHECKBOUNDARY(location, 7)) {
831 if (shadow[1]==old_shadowval)
832 shadow[1] = new_shadowval;
834 if (shadow[2]==old_shadowval)
835 shadow[2] = new_shadowval;
837 if (shadow[3]==old_shadowval)
838 shadow[3] = new_shadowval;
840 if (shadow[4]==old_shadowval)
841 shadow[4] = new_shadowval;
843 if (shadow[5]==old_shadowval)
844 shadow[5] = new_shadowval;
846 if (shadow[6]==old_shadowval)
847 shadow[6] = new_shadowval;
849 if (shadow[7]==old_shadowval)
850 shadow[7] = new_shadowval;
856 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
858 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
860 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
862 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
864 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
866 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
868 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
871 void raceCheckRead32(thread_id_t thread, const void *location)
873 uint64_t old_shadowval, new_shadowval;
874 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
876 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
877 if (CHECKBOUNDARY(location, 3)) {
878 if (shadow[1]==old_shadowval)
879 shadow[1] = new_shadowval;
881 if (shadow[2]==old_shadowval)
882 shadow[2] = new_shadowval;
884 if (shadow[3]==old_shadowval)
885 shadow[3] = new_shadowval;
891 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
893 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
895 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
898 void raceCheckRead16(thread_id_t thread, const void *location)
900 uint64_t old_shadowval, new_shadowval;
901 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
904 uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
905 if (CHECKBOUNDARY(location, 1)) {
906 if (shadow[1]==old_shadowval) {
907 shadow[1] = new_shadowval;
911 raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
914 void raceCheckRead8(thread_id_t thread, const void *location)
916 uint64_t old_shadowval, new_shadowval;
917 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
919 raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
922 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
924 uint64_t *shadow = lookupAddressEntry(location);
925 uint64_t shadowval = *shadow;
926 ClockVector *currClock = get_execution()->get_cv(thread);
927 if (currClock == NULL)
930 struct DataRace * race = NULL;
932 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
933 race = fullRaceCheckWrite(thread, location, shadow, currClock);
938 int threadid = id_to_int(thread);
939 modelclock_t ourClock = currClock->getClock(thread);
941 /* Thread ID is too large or clock is too large. */
942 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
943 expandRecord(shadow);
944 race = fullRaceCheckWrite(thread, location, shadow, currClock);
949 /* Check for datarace against last read. */
950 modelclock_t readClock = READVECTOR(shadowval);
951 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
953 if (clock_may_race(currClock, thread, readClock, readThread)) {
954 /* We have a datarace */
955 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
961 /* Check for datarace against last write. */
962 modelclock_t writeClock = WRITEVECTOR(shadowval);
963 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
965 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
966 /* We have a datarace */
967 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
973 *shadow = ENCODEOP(0, 0, threadid, ourClock);
975 *old_val = shadowval;
981 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
982 if (raceset->add(race))
984 else model_free(race);
990 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
991 uint64_t *shadow = lookupAddressEntry(location);
993 uint64_t shadowval = *shadow;
995 ClockVector *currClock = get_execution()->get_cv(thread);
996 if (currClock == NULL)
999 struct DataRace * race = NULL;
1000 /* Do full record */
1001 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1002 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1007 int threadid = id_to_int(thread);
1008 modelclock_t ourClock = currClock->getClock(thread);
1010 /* Thread ID is too large or clock is too large. */
1011 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1012 expandRecord(shadow);
1013 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1018 /* Check for datarace against last read. */
1019 modelclock_t readClock = READVECTOR(shadowval);
1020 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1022 if (clock_may_race(currClock, thread, readClock, readThread)) {
1023 /* We have a datarace */
1024 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1030 /* Check for datarace against last write. */
1031 modelclock_t writeClock = WRITEVECTOR(shadowval);
1032 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1034 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1035 /* We have a datarace */
1036 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1042 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1047 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1048 if (raceset->add(race))
1050 else model_free(race);
1054 void raceCheckWrite64(thread_id_t thread, const void *location)
1056 uint64_t old_shadowval, new_shadowval;
1057 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1059 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1060 if (CHECKBOUNDARY(location, 7)) {
1061 if (shadow[1]==old_shadowval)
1062 shadow[1] = new_shadowval;
1064 if (shadow[2]==old_shadowval)
1065 shadow[2] = new_shadowval;
1067 if (shadow[3]==old_shadowval)
1068 shadow[3] = new_shadowval;
1070 if (shadow[4]==old_shadowval)
1071 shadow[4] = new_shadowval;
1073 if (shadow[5]==old_shadowval)
1074 shadow[5] = new_shadowval;
1076 if (shadow[6]==old_shadowval)
1077 shadow[6] = new_shadowval;
1079 if (shadow[7]==old_shadowval)
1080 shadow[7] = new_shadowval;
1086 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1088 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1090 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1092 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1094 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1096 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1098 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1101 void raceCheckWrite32(thread_id_t thread, const void *location)
1103 uint64_t old_shadowval, new_shadowval;
1104 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1106 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1107 if (CHECKBOUNDARY(location, 3)) {
1108 if (shadow[1]==old_shadowval)
1109 shadow[1] = new_shadowval;
1111 if (shadow[2]==old_shadowval)
1112 shadow[2] = new_shadowval;
1114 if (shadow[3]==old_shadowval)
1115 shadow[3] = new_shadowval;
1121 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1123 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1125 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1128 void raceCheckWrite16(thread_id_t thread, const void *location)
1130 uint64_t old_shadowval, new_shadowval;
1131 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1133 uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1134 if (CHECKBOUNDARY(location, 1)) {
1135 if (shadow[1]==old_shadowval) {
1136 shadow[1] = new_shadowval;
1140 raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1143 void raceCheckWrite8(thread_id_t thread, const void *location)
1145 uint64_t old_shadowval, new_shadowval;
1146 old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1148 raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);