Add support for converting normal writes into ModelActions after the fact
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 static const ModelExecution * get_execution()
20 {
21         return model->get_execution();
22 }
23
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
26 {
27         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30         raceset = new RaceSet();
31 }
32
33 void * table_calloc(size_t size)
34 {
35         if ((((char *)memory_base) + size) > memory_top) {
36                 return snapshot_calloc(size, 1);
37         } else {
38                 void *tmp = memory_base;
39                 memory_base = ((char *)memory_base) + size;
40                 return tmp;
41         }
42 }
43
44 /** This function looks up the entry in the shadow table corresponding to a
45  * given address.*/
46 static uint64_t * lookupAddressEntry(const void *address)
47 {
48         struct ShadowTable *currtable = root;
49 #if BIT48
50         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51         if (currtable == NULL) {
52                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
53         }
54 #endif
55
56         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57         if (basetable == NULL) {
58                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
59         }
60         return &basetable->array[((uintptr_t)address) & MASK16BIT];
61 }
62
63
64 bool hasNonAtomicStore(const void *address) {
65         uint64_t * shadow = lookupAddressEntry(address);
66         uint64_t shadowval = *shadow;
67         if (ISSHORTRECORD(shadowval)) {
68                 //Do we have a non atomic write with a non-zero clock
69                 return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
70         } else {
71                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
72                 return !record->isAtomic && record->writeClock != 0;
73         }
74 }
75
76 void setAtomicStoreFlag(const void *address) {
77         uint64_t * shadow = lookupAddressEntry(address);
78         uint64_t shadowval = *shadow;
79         if (ISSHORTRECORD(shadowval)) {
80                 *shadow = shadowval | ATOMICMASK;
81         } else {
82                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
83                 record->isAtomic = 1;
84         }
85 }
86
87 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
88         uint64_t * shadow = lookupAddressEntry(address);
89         uint64_t shadowval = *shadow;
90         if (ISSHORTRECORD(shadowval)) {
91                 //Do we have a non atomic write with a non-zero clock
92                 *thread = WRTHREADID(shadowval);
93                 *clock = WRITEVECTOR(shadowval);
94         } else {
95                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
96                 *thread = record->writeThread;
97                 *clock = record->writeClock;
98         }
99 }
100
101 /**
102  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
103  * to check the potential for a data race.
104  * @param clock1 The current clock vector
105  * @param tid1 The current thread; paired with clock1
106  * @param clock2 The clock value for the potentially-racing action
107  * @param tid2 The thread ID for the potentially-racing action
108  * @return true if the current clock allows a race with the event at clock2/tid2
109  */
110 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
111                                                                                                          modelclock_t clock2, thread_id_t tid2)
112 {
113         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
114 }
115
116 /**
117  * Expands a record from the compact form to the full form.  This is
118  * necessary for multiple readers or for very large thread ids or time
119  * stamps. */
120 static void expandRecord(uint64_t *shadow)
121 {
122         uint64_t shadowval = *shadow;
123
124         modelclock_t readClock = READVECTOR(shadowval);
125         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
126         modelclock_t writeClock = WRITEVECTOR(shadowval);
127         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
128
129         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
130         record->writeThread = writeThread;
131         record->writeClock = writeClock;
132
133         if (readClock != 0) {
134                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
135                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
136                 record->numReads = 1;
137                 ASSERT(readThread >= 0);
138                 record->thread[0] = readThread;
139                 record->readClock[0] = readClock;
140         }
141         if (shadowval & ATOMICMASK)
142                 record->isAtomic = 1;
143         *shadow = (uint64_t) record;
144 }
145
146 #define FIRST_STACK_FRAME 2
147
148 unsigned int race_hash(struct DataRace *race) {
149         unsigned int hash = 0;
150         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
151                 hash ^= ((uintptr_t)race->backtrace[i]);
152                 hash = (hash >> 3) | (hash << 29);
153         }
154         return hash;
155 }
156
157
158 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
159         if (r1->numframes != r2->numframes)
160                 return false;
161         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
162                 if (r1->backtrace[i] != r2->backtrace[i])
163                         return false;
164         }
165         return true;
166 }
167
168 /** This function is called when we detect a data race.*/
169 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
170 {
171         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
172         race->oldthread = oldthread;
173         race->oldclock = oldclock;
174         race->isoldwrite = isoldwrite;
175         race->newaction = newaction;
176         race->isnewwrite = isnewwrite;
177         race->address = address;
178         return race;
179 }
180
181 /**
182  * @brief Assert a data race
183  *
184  * Asserts a data race which is currently realized, causing the execution to
185  * end and stashing a message in the model-checker's bug list
186  *
187  * @param race The race to report
188  */
189 void assert_race(struct DataRace *race)
190 {
191         model_print("Race detected at location: \n");
192         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
193         model_print("\nData race detected @ address %p:\n"
194                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
195                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
196                                                         race->address,
197                                                         race->isoldwrite ? "write" : "read",
198                                                         id_to_int(race->oldthread),
199                                                         race->oldclock,
200                                                         race->isnewwrite ? "write" : "read",
201                                                         id_to_int(race->newaction->get_tid()),
202                                                         race->newaction->get_seq_number()
203                                                         );
204 }
205
206 /** This function does race detection for a write on an expanded record. */
207 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
208 {
209         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
210         struct DataRace * race = NULL;
211
212         /* Check for datarace against last read. */
213
214         for (int i = 0;i < record->numReads;i++) {
215                 modelclock_t readClock = record->readClock[i];
216                 thread_id_t readThread = record->thread[i];
217
218                 /* Note that readClock can't actuall be zero here, so it could be
219                          optimized. */
220
221                 if (clock_may_race(currClock, thread, readClock, readThread)) {
222                         /* We have a datarace */
223                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
224                         goto Exit;
225                 }
226         }
227
228         /* Check for datarace against last write. */
229
230         {
231                 modelclock_t writeClock = record->writeClock;
232                 thread_id_t writeThread = record->writeThread;
233
234                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
235                         /* We have a datarace */
236                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
237                         goto Exit;
238                 }
239         }
240 Exit:
241         record->numReads = 0;
242         record->writeThread = thread;
243         record->isAtomic = 0;
244         modelclock_t ourClock = currClock->getClock(thread);
245         record->writeClock = ourClock;
246         return race;
247 }
248
249 /** This function does race detection on a write. */
250 void raceCheckWrite(thread_id_t thread, void *location)
251 {
252         uint64_t *shadow = lookupAddressEntry(location);
253         uint64_t shadowval = *shadow;
254         ClockVector *currClock = get_execution()->get_cv(thread);
255         struct DataRace * race = NULL;
256         /* Do full record */
257         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
258                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
259                 goto Exit;
260         }
261
262         {
263                 int threadid = id_to_int(thread);
264                 modelclock_t ourClock = currClock->getClock(thread);
265
266                 /* Thread ID is too large or clock is too large. */
267                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
268                         expandRecord(shadow);
269                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
270                         goto Exit;
271                 }
272
273
274
275                 {
276                         /* Check for datarace against last read. */
277
278                         modelclock_t readClock = READVECTOR(shadowval);
279                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
280
281                         if (clock_may_race(currClock, thread, readClock, readThread)) {
282                                 /* We have a datarace */
283                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
284                                 goto ShadowExit;
285                         }
286                 }
287
288                 {
289                         /* Check for datarace against last write. */
290
291                         modelclock_t writeClock = WRITEVECTOR(shadowval);
292                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
293
294                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
295                                 /* We have a datarace */
296                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
297                                 goto ShadowExit;
298                         }
299                 }
300
301 ShadowExit:
302                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
303         }
304
305 Exit:
306         if (race) {
307                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
308                 if (raceset->add(race))
309                         assert_race(race);
310                 else model_free(race);
311         }
312 }
313
314 /** This function does race detection for a write on an expanded record. */
315 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
316         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
317         record->numReads = 0;
318         record->writeThread = thread;
319         modelclock_t ourClock = currClock->getClock(thread);
320         record->writeClock = ourClock;
321         record->isAtomic = 1;
322 }
323
324 /** This function just updates metadata on atomic write. */
325 void recordWrite(thread_id_t thread, void *location) {
326         uint64_t *shadow = lookupAddressEntry(location);
327         uint64_t shadowval = *shadow;
328         ClockVector *currClock = get_execution()->get_cv(thread);
329         /* Do full record */
330         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
331                 fullRecordWrite(thread, location, shadow, currClock);
332                 return;
333         }
334
335         int threadid = id_to_int(thread);
336         modelclock_t ourClock = currClock->getClock(thread);
337
338         /* Thread ID is too large or clock is too large. */
339         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
340                 expandRecord(shadow);
341                 fullRecordWrite(thread, location, shadow, currClock);
342                 return;
343         }
344
345         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
346 }
347
348
349
350 /** This function does race detection on a read for an expanded record. */
351 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
352 {
353         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
354         struct DataRace * race = NULL;
355         /* Check for datarace against last write. */
356
357         modelclock_t writeClock = record->writeClock;
358         thread_id_t writeThread = record->writeThread;
359
360         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
361                 /* We have a datarace */
362                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
363         }
364
365         /* Shorten vector when possible */
366
367         int copytoindex = 0;
368
369         for (int i = 0;i < record->numReads;i++) {
370                 modelclock_t readClock = record->readClock[i];
371                 thread_id_t readThread = record->thread[i];
372
373                 /*  Note that is not really a datarace check as reads cannot
374                                 actually race.  It is just determining that this read subsumes
375                                 another in the sense that either this read races or neither
376                                 read races. Note that readClock can't actually be zero, so it
377                                 could be optimized.  */
378
379                 if (clock_may_race(currClock, thread, readClock, readThread)) {
380                         /* Still need this read in vector */
381                         if (copytoindex != i) {
382                                 ASSERT(record->thread[i] >= 0);
383                                 record->readClock[copytoindex] = record->readClock[i];
384                                 record->thread[copytoindex] = record->thread[i];
385                         }
386                         copytoindex++;
387                 }
388         }
389
390         if (__builtin_popcount(copytoindex) <= 1) {
391                 if (copytoindex == 0) {
392                         int newCapacity = INITCAPACITY;
393                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
394                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
395                 } else if (copytoindex>=INITCAPACITY) {
396                         int newCapacity = copytoindex * 2;
397                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
398                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
399                         std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
400                         std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
401                         snapshot_free(record->readClock);
402                         snapshot_free(record->thread);
403                         record->readClock = newreadClock;
404                         record->thread = newthread;
405                 }
406         }
407
408         modelclock_t ourClock = currClock->getClock(thread);
409
410         ASSERT(thread >= 0);
411         record->thread[copytoindex] = thread;
412         record->readClock[copytoindex] = ourClock;
413         record->numReads = copytoindex + 1;
414         return race;
415 }
416
417 /** This function does race detection on a read. */
418 void raceCheckRead(thread_id_t thread, const void *location)
419 {
420         uint64_t *shadow = lookupAddressEntry(location);
421         uint64_t shadowval = *shadow;
422         ClockVector *currClock = get_execution()->get_cv(thread);
423         struct DataRace * race = NULL;
424
425         /* Do full record */
426         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
427                 race = fullRaceCheckRead(thread, location, shadow, currClock);
428                 goto Exit;
429         }
430
431         {
432                 int threadid = id_to_int(thread);
433                 modelclock_t ourClock = currClock->getClock(thread);
434
435                 /* Thread ID is too large or clock is too large. */
436                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
437                         expandRecord(shadow);
438                         race = fullRaceCheckRead(thread, location, shadow, currClock);
439                         goto Exit;
440                 }
441
442                 /* Check for datarace against last write. */
443
444                 modelclock_t writeClock = WRITEVECTOR(shadowval);
445                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
446
447                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
448                         /* We have a datarace */
449                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
450                         goto ShadowExit;
451                 }
452
453 ShadowExit:
454                 {
455                         modelclock_t readClock = READVECTOR(shadowval);
456                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
457
458                         if (clock_may_race(currClock, thread, readClock, readThread)) {
459                                 /* We don't subsume this read... Have to expand record. */
460                                 expandRecord(shadow);
461                                 fullRaceCheckRead(thread, location, shadow, currClock);
462                                 goto Exit;
463                         }
464                 }
465
466                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
467         }
468 Exit:
469         if (race) {
470                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
471                 if (raceset->add(race))
472                         assert_race(race);
473                 else model_free(race);
474         }
475 }