11 #include "hashtable.h"
16 #define FAILURE(mesg) { model_print("failed in the API: %s with errno relative message: %s\n", mesg, strerror(errno)); exit(EXIT_FAILURE); }
19 #define SSDEBUG model_print
21 #define SSDEBUG(...) do { } while (0)
24 #if USE_MPROTECT_SNAPSHOT
25 /* Each snapshotrecord lists the firstbackingpage that must be written to
26 * revert to that snapshot */
27 struct SnapShotRecord {
28 unsigned int firstBackingPage;
31 /** @brief Backing store page */
32 typedef unsigned char snapshot_page_t[PAGESIZE];
34 /* List the base address of the corresponding page in the backing store so we
35 * know where to copy it to */
36 struct BackingPageRecord {
40 /* Struct for each memory region */
42 void *basePtr; // base of memory region
43 int sizeInPages; // size of memory region in pages
46 /* Primary struct for snapshotting system */
48 struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
49 snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
50 void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
51 struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
52 struct SnapShotRecord *snapShots; //This pointer references the snapshot array
54 unsigned int lastSnapShot; //Stores the next snapshot record we should use
55 unsigned int lastBackingPage; //Stores the next backingpage we should use
56 unsigned int lastRegion; //Stores the next memory region to be used
58 unsigned int maxRegions; //Stores the max number of memory regions we support
59 unsigned int maxBackingPages; //Stores the total number of backing pages
60 unsigned int maxSnapShots; //Stores the total number of snapshots we allow
67 #define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20)) // 100mb for the shared memory
68 #define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
71 void *mSharedMemoryBase;
74 volatile snapshot_id mIDToRollback;
75 ucontext_t mContextToRollback;
76 snapshot_id currSnapShotID;
80 static struct SnapShot *snapshotrecord = NULL;
82 /** PageAlignedAdressUpdate return a page aligned address for the
83 * address being added as a side effect the numBytes are also changed.
85 static void * PageAlignAddressUpward(void *addr)
87 return (void *)((((uintptr_t)addr) + PAGESIZE - 1) & ~(PAGESIZE - 1));
90 #if !USE_MPROTECT_SNAPSHOT
92 * These variables are necessary because the stack is shared region and
93 * there exists a race between all processes executing the same function.
94 * To avoid the problem above, we require variables allocated in 'safe' regions.
95 * The bug was actually observed with the forkID, these variables below are
96 * used to indicate the various contexts to which to switch to.
98 * @savedSnapshotContext: contains the point to which takesnapshot() call should switch to.
99 * @savedUserSnapshotContext: contains the point to which the process whose snapshotid is equal to the rollbackid should switch to
100 * @snapshotid: it is a running counter for the various forked processes snapshotid. it is incremented and set in a persistently shared record
102 static ucontext_t savedSnapshotContext;
103 static ucontext_t savedUserSnapshotContext;
104 static snapshot_id snapshotid = 0;
106 #else /* USE_MPROTECT_SNAPSHOT */
108 /** ReturnPageAlignedAddress returns a page aligned address for the
109 * address being added as a side effect the numBytes are also changed.
111 static void * ReturnPageAlignedAddress(void *addr)
113 return (void *)(((uintptr_t)addr) & ~(PAGESIZE - 1));
116 /** The initSnapShotRecord method initialized the snapshotting data
117 * structures for the mprotect based snapshot.
119 static void initSnapShotRecord(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions)
121 snapshotrecord = (struct SnapShot *)model_malloc(sizeof(struct SnapShot));
122 snapshotrecord->regionsToSnapShot = (struct MemoryRegion *)model_malloc(sizeof(struct MemoryRegion) * nummemoryregions);
123 snapshotrecord->backingStoreBasePtr = (void *)model_malloc(sizeof(snapshot_page_t) * (numbackingpages + 1));
124 //Page align the backingstorepages
125 snapshotrecord->backingStore = (snapshot_page_t *)PageAlignAddressUpward(snapshotrecord->backingStoreBasePtr);
126 snapshotrecord->backingRecords = (struct BackingPageRecord *)model_malloc(sizeof(struct BackingPageRecord) * numbackingpages);
127 snapshotrecord->snapShots = (struct SnapShotRecord *)model_malloc(sizeof(struct SnapShotRecord) * numsnapshots);
128 snapshotrecord->lastSnapShot = 0;
129 snapshotrecord->lastBackingPage = 0;
130 snapshotrecord->lastRegion = 0;
131 snapshotrecord->maxRegions = nummemoryregions;
132 snapshotrecord->maxBackingPages = numbackingpages;
133 snapshotrecord->maxSnapShots = numsnapshots;
136 /** HandlePF is the page fault handler for mprotect based snapshotting
139 static void HandlePF(int sig, siginfo_t *si, void *unused)
141 if (si->si_code == SEGV_MAPERR) {
142 model_print("Real Fault at %p\n", si->si_addr);
144 model_print("For debugging, place breakpoint at: %s:%d\n",
148 void* addr = ReturnPageAlignedAddress(si->si_addr);
150 unsigned int backingpage = snapshotrecord->lastBackingPage++; //Could run out of pages...
151 if (backingpage == snapshotrecord->maxBackingPages) {
152 model_print("Out of backing pages at %p\n", si->si_addr);
157 memcpy(&(snapshotrecord->backingStore[backingpage]), addr, sizeof(snapshot_page_t));
158 //remember where to copy page back to
159 snapshotrecord->backingRecords[backingpage].basePtrOfPage = addr;
160 //set protection to read/write
161 if (mprotect(addr, sizeof(snapshot_page_t), PROT_READ | PROT_WRITE)) {
163 // Handle error by quitting?
166 #endif /* USE_MPROTECT_SNAPSHOT */
168 #if !USE_MPROTECT_SNAPSHOT
169 static void createSharedMemory()
171 //step 1. create shared memory.
172 void *memMapBase = mmap(0, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
173 if (MAP_FAILED == memMapBase)
176 //Setup snapshot record at top of free region
177 snapshotrecord = (struct SnapShot *)memMapBase;
178 snapshotrecord->mSharedMemoryBase = (void *)((uintptr_t)memMapBase + sizeof(struct SnapShot));
179 snapshotrecord->mStackBase = (void *)((uintptr_t)memMapBase + SHARED_MEMORY_DEFAULT);
180 snapshotrecord->mStackSize = STACK_SIZE_DEFAULT;
181 snapshotrecord->mIDToRollback = -1;
182 snapshotrecord->currSnapShotID = 0;
186 * Create a new mspace pointer for the non-snapshotting (i.e., inter-process
187 * shared) memory region. Only for fork-based snapshotting.
189 * @return The shared memory mspace
191 mspace create_shared_mspace()
194 createSharedMemory();
195 return create_mspace_with_base((void *)(snapshotrecord->mSharedMemoryBase), SHARED_MEMORY_DEFAULT - sizeof(struct SnapShot), 1);
200 /** The initSnapshotLibrary function initializes the snapshot library.
201 * @param entryPoint the function that should run the program.
203 #if USE_MPROTECT_SNAPSHOT
205 void initSnapshotLibrary(unsigned int numbackingpages,
206 unsigned int numsnapshots, unsigned int nummemoryregions,
207 unsigned int numheappages, VoidFuncPtr entryPoint)
209 /* Setup a stack for our signal handler.... */
211 ss.ss_sp = PageAlignAddressUpward(model_malloc(SIGSTACKSIZE + PAGESIZE - 1));
212 ss.ss_size = SIGSTACKSIZE;
214 sigaltstack(&ss, NULL);
217 sa.sa_flags = SA_SIGINFO | SA_NODEFER | SA_RESTART | SA_ONSTACK;
218 sigemptyset(&sa.sa_mask);
219 sa.sa_sigaction = HandlePF;
221 if (sigaction(SIGBUS, &sa, NULL) == -1) {
222 model_print("SIGACTION CANNOT BE INSTALLED\n");
226 if (sigaction(SIGSEGV, &sa, NULL) == -1) {
227 model_print("SIGACTION CANNOT BE INSTALLED\n");
231 initSnapShotRecord(numbackingpages, numsnapshots, nummemoryregions);
233 // EVIL HACK: We need to make sure that calls into the HandlePF method don't cause dynamic links
234 // The problem is that we end up protecting state in the dynamic linker...
235 // Solution is to call our signal handler before we start protecting stuff...
238 memset(&si, 0, sizeof(si));
239 si.si_addr = ss.ss_sp;
240 HandlePF(SIGSEGV, &si, NULL);
241 snapshotrecord->lastBackingPage--; //remove the fake page we copied
243 void *basemySpace = model_malloc((numheappages + 1) * PAGESIZE);
244 void *pagealignedbase = PageAlignAddressUpward(basemySpace);
245 user_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
246 addMemoryRegionToSnapShot(pagealignedbase, numheappages);
248 void *base_model_snapshot_space = model_malloc((numheappages + 1) * PAGESIZE);
249 pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
250 model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
251 addMemoryRegionToSnapShot(pagealignedbase, numheappages);
256 void initSnapshotLibrary(unsigned int numbackingpages,
257 unsigned int numsnapshots, unsigned int nummemoryregions,
258 unsigned int numheappages, VoidFuncPtr entryPoint)
261 createSharedMemory();
263 void *base_model_snapshot_space = malloc((numheappages + 1) * PAGESIZE);
264 void *pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
265 model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
267 //step 2 setup the stack context.
268 ucontext_t newContext;
269 getcontext(&newContext);
270 newContext.uc_stack.ss_sp = snapshotrecord->mStackBase;
271 newContext.uc_stack.ss_size = STACK_SIZE_DEFAULT;
272 makecontext(&newContext, entryPoint, 0);
273 /* switch to a new entryPoint context, on a new stack */
274 swapcontext(&savedSnapshotContext, &newContext);
276 /* switch back here when takesnapshot is called */
278 snapshotid = snapshotrecord->currSnapShotID;
279 /* This bool indicates that the current process's snapshotid is same
280 as the id to which the rollback needs to occur */
282 bool rollback = false;
284 snapshotrecord->currSnapShotID = snapshotid + 1;
288 /* If the rollback bool is set, switch to the context we need to
289 return to during a rollback. */
291 setcontext(&(snapshotrecord->mContextToRollback));
293 /*Child process which is forked as a result of takesnapshot
294 call should switch back to the takesnapshot context*/
295 setcontext(&savedUserSnapshotContext);
301 SSDEBUG("The process id of child is %d and the process id of this process is %d and snapshot id is %d\n",
302 forkedID, getpid(), snapshotid);
305 retVal = waitpid(forkedID, &status, 0);
306 } while (-1 == retVal && errno == EINTR);
308 if (snapshotrecord->mIDToRollback != snapshotid) {
317 /** The addMemoryRegionToSnapShot function assumes that addr is page aligned.
319 void addMemoryRegionToSnapShot(void *addr, unsigned int numPages)
321 #if USE_MPROTECT_SNAPSHOT
322 unsigned int memoryregion = snapshotrecord->lastRegion++;
323 if (memoryregion == snapshotrecord->maxRegions) {
324 model_print("Exceeded supported number of memory regions!\n");
328 snapshotrecord->regionsToSnapShot[memoryregion].basePtr = addr;
329 snapshotrecord->regionsToSnapShot[memoryregion].sizeInPages = numPages;
330 #endif //NOT REQUIRED IN THE CASE OF FORK BASED SNAPSHOTS.
333 /** The takeSnapshot function takes a snapshot.
334 * @return The snapshot identifier.
336 snapshot_id takeSnapshot()
338 #if USE_MPROTECT_SNAPSHOT
339 for (unsigned int region = 0; region < snapshotrecord->lastRegion; region++) {
340 if (mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ) == -1) {
342 model_print("Failed to mprotect inside of takeSnapShot\n");
346 unsigned int snapshot = snapshotrecord->lastSnapShot++;
347 if (snapshot == snapshotrecord->maxSnapShots) {
348 model_print("Out of snapshots\n");
351 snapshotrecord->snapShots[snapshot].firstBackingPage = snapshotrecord->lastBackingPage;
355 swapcontext(&savedUserSnapshotContext, &savedSnapshotContext);
356 SSDEBUG("TAKESNAPSHOT RETURN\n");
361 /** The rollBack function rollback to the given snapshot identifier.
362 * @param theID is the snapshot identifier to rollback to.
364 void rollBack(snapshot_id theID)
366 #if USE_MPROTECT_SNAPSHOT == 2
367 if (snapshotrecord->lastSnapShot == (theID + 1)) {
368 for (unsigned int page = snapshotrecord->snapShots[theID].firstBackingPage; page < snapshotrecord->lastBackingPage; page++) {
369 memcpy(snapshotrecord->backingRecords[page].basePtrOfPage, &snapshotrecord->backingStore[page], sizeof(snapshot_page_t));
375 #if USE_MPROTECT_SNAPSHOT
376 HashTable< void *, bool, uintptr_t, 4, model_malloc, model_calloc, model_free> duplicateMap;
377 for (unsigned int region = 0; region < snapshotrecord->lastRegion; region++) {
378 if (mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ | PROT_WRITE) == -1) {
380 model_print("Failed to mprotect inside of takeSnapShot\n");
384 for (unsigned int page = snapshotrecord->snapShots[theID].firstBackingPage; page < snapshotrecord->lastBackingPage; page++) {
385 if (!duplicateMap.contains(snapshotrecord->backingRecords[page].basePtrOfPage)) {
386 duplicateMap.put(snapshotrecord->backingRecords[page].basePtrOfPage, true);
387 memcpy(snapshotrecord->backingRecords[page].basePtrOfPage, &snapshotrecord->backingStore[page], sizeof(snapshot_page_t));
390 snapshotrecord->lastSnapShot = theID;
391 snapshotrecord->lastBackingPage = snapshotrecord->snapShots[theID].firstBackingPage;
392 takeSnapshot(); //Make sure current snapshot is still good...All later ones are cleared
394 snapshotrecord->mIDToRollback = theID;
395 volatile int sTemp = 0;
396 getcontext(&snapshotrecord->mContextToRollback);
398 * This is used to quit the process on rollback, so that the process
399 * which needs to rollback can quit allowing the process whose
400 * snapshotid matches the rollbackid to switch to this context and
405 SSDEBUG("Invoked rollback\n");
409 * This fix obviates the need for a finalize call. hence less dependences for model-checker....
411 snapshotrecord->mIDToRollback = -1;