10 #include "hashtable.h"
16 /** PageAlignedAdressUpdate return a page aligned address for the
17 * address being added as a side effect the numBytes are also changed.
19 static void * PageAlignAddressUpward(void *addr)
21 return (void *)((((uintptr_t)addr) + PAGESIZE - 1) & ~(PAGESIZE - 1));
24 #if USE_MPROTECT_SNAPSHOT
26 /* Each SnapShotRecord lists the firstbackingpage that must be written to
27 * revert to that snapshot */
28 struct SnapShotRecord {
29 unsigned int firstBackingPage;
32 /** @brief Backing store page */
33 typedef unsigned char snapshot_page_t[PAGESIZE];
35 /* List the base address of the corresponding page in the backing store so we
36 * know where to copy it to */
37 struct BackingPageRecord {
41 /* Struct for each memory region */
43 void *basePtr; // base of memory region
44 int sizeInPages; // size of memory region in pages
47 /** ReturnPageAlignedAddress returns a page aligned address for the
48 * address being added as a side effect the numBytes are also changed.
50 static void * ReturnPageAlignedAddress(void *addr)
52 return (void *)(((uintptr_t)addr) & ~(PAGESIZE - 1));
55 /* Primary struct for snapshotting system */
56 struct mprot_snapshotter {
57 mprot_snapshotter(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions);
60 struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
61 snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
62 void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
63 struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
64 struct SnapShotRecord *snapShots; //This pointer references the snapshot array
66 unsigned int lastSnapShot; //Stores the next snapshot record we should use
67 unsigned int lastBackingPage; //Stores the next backingpage we should use
68 unsigned int lastRegion; //Stores the next memory region to be used
70 unsigned int maxRegions; //Stores the max number of memory regions we support
71 unsigned int maxBackingPages; //Stores the total number of backing pages
72 unsigned int maxSnapShots; //Stores the total number of snapshots we allow
77 static struct mprot_snapshotter *mprot_snap = NULL;
79 mprot_snapshotter::mprot_snapshotter(unsigned int backing_pages, unsigned int snapshots, unsigned int regions) :
84 maxBackingPages(backing_pages),
85 maxSnapShots(snapshots)
87 regionsToSnapShot = (struct MemoryRegion *)model_malloc(sizeof(struct MemoryRegion) * regions);
88 backingStoreBasePtr = (void *)model_malloc(sizeof(snapshot_page_t) * (backing_pages + 1));
89 //Page align the backingstorepages
90 backingStore = (snapshot_page_t *)PageAlignAddressUpward(backingStoreBasePtr);
91 backingRecords = (struct BackingPageRecord *)model_malloc(sizeof(struct BackingPageRecord) * backing_pages);
92 snapShots = (struct SnapShotRecord *)model_malloc(sizeof(struct SnapShotRecord) * snapshots);
95 mprot_snapshotter::~mprot_snapshotter()
97 model_free(regionsToSnapShot);
98 model_free(backingStoreBasePtr);
99 model_free(backingRecords);
100 model_free(snapShots);
103 /** mprot_handle_pf is the page fault handler for mprotect based snapshotting
106 static void mprot_handle_pf(int sig, siginfo_t *si, void *unused)
108 if (si->si_code == SEGV_MAPERR) {
109 model_print("Segmentation fault at %p\n", si->si_addr);
110 model_print("For debugging, place breakpoint at: %s:%d\n",
112 // print_trace(); // Trace printing may cause dynamic memory allocation
115 void* addr = ReturnPageAlignedAddress(si->si_addr);
117 unsigned int backingpage = mprot_snap->lastBackingPage++; //Could run out of pages...
118 if (backingpage == mprot_snap->maxBackingPages) {
119 model_print("Out of backing pages at %p\n", si->si_addr);
124 memcpy(&(mprot_snap->backingStore[backingpage]), addr, sizeof(snapshot_page_t));
125 //remember where to copy page back to
126 mprot_snap->backingRecords[backingpage].basePtrOfPage = addr;
127 //set protection to read/write
128 if (mprotect(addr, sizeof(snapshot_page_t), PROT_READ | PROT_WRITE)) {
130 // Handle error by quitting?
134 static void mprot_snapshot_init(unsigned int numbackingpages,
135 unsigned int numsnapshots, unsigned int nummemoryregions,
136 unsigned int numheappages, VoidFuncPtr entryPoint)
138 /* Setup a stack for our signal handler.... */
140 ss.ss_sp = PageAlignAddressUpward(model_malloc(SIGSTACKSIZE + PAGESIZE - 1));
141 ss.ss_size = SIGSTACKSIZE;
143 sigaltstack(&ss, NULL);
146 sa.sa_flags = SA_SIGINFO | SA_NODEFER | SA_RESTART | SA_ONSTACK;
147 sigemptyset(&sa.sa_mask);
148 sa.sa_sigaction = mprot_handle_pf;
150 if (sigaction(SIGBUS, &sa, NULL) == -1) {
151 perror("sigaction(SIGBUS)");
155 if (sigaction(SIGSEGV, &sa, NULL) == -1) {
156 perror("sigaction(SIGSEGV)");
160 mprot_snap = new mprot_snapshotter(numbackingpages, numsnapshots, nummemoryregions);
162 // EVIL HACK: We need to make sure that calls into the mprot_handle_pf method don't cause dynamic links
163 // The problem is that we end up protecting state in the dynamic linker...
164 // Solution is to call our signal handler before we start protecting stuff...
167 memset(&si, 0, sizeof(si));
168 si.si_addr = ss.ss_sp;
169 mprot_handle_pf(SIGSEGV, &si, NULL);
170 mprot_snap->lastBackingPage--; //remove the fake page we copied
172 void *basemySpace = model_malloc((numheappages + 1) * PAGESIZE);
173 void *pagealignedbase = PageAlignAddressUpward(basemySpace);
174 user_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
175 snapshot_add_memory_region(pagealignedbase, numheappages);
177 void *base_model_snapshot_space = model_malloc((numheappages + 1) * PAGESIZE);
178 pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
179 model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
180 snapshot_add_memory_region(pagealignedbase, numheappages);
185 static void mprot_add_to_snapshot(void *addr, unsigned int numPages)
187 unsigned int memoryregion = mprot_snap->lastRegion++;
188 if (memoryregion == mprot_snap->maxRegions) {
189 model_print("Exceeded supported number of memory regions!\n");
193 mprot_snap->regionsToSnapShot[memoryregion].basePtr = addr;
194 mprot_snap->regionsToSnapShot[memoryregion].sizeInPages = numPages;
197 static snapshot_id mprot_take_snapshot()
199 for (unsigned int region = 0; region < mprot_snap->lastRegion; region++) {
200 if (mprotect(mprot_snap->regionsToSnapShot[region].basePtr, mprot_snap->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ) == -1) {
202 model_print("Failed to mprotect inside of takeSnapShot\n");
206 unsigned int snapshot = mprot_snap->lastSnapShot++;
207 if (snapshot == mprot_snap->maxSnapShots) {
208 model_print("Out of snapshots\n");
211 mprot_snap->snapShots[snapshot].firstBackingPage = mprot_snap->lastBackingPage;
216 static void mprot_roll_back(snapshot_id theID)
218 #if USE_MPROTECT_SNAPSHOT == 2
219 if (mprot_snap->lastSnapShot == (theID + 1)) {
220 for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage; page < mprot_snap->lastBackingPage; page++) {
221 memcpy(mprot_snap->backingRecords[page].basePtrOfPage, &mprot_snap->backingStore[page], sizeof(snapshot_page_t));
227 HashTable< void *, bool, uintptr_t, 4, model_malloc, model_calloc, model_free> duplicateMap;
228 for (unsigned int region = 0; region < mprot_snap->lastRegion; region++) {
229 if (mprotect(mprot_snap->regionsToSnapShot[region].basePtr, mprot_snap->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ | PROT_WRITE) == -1) {
231 model_print("Failed to mprotect inside of takeSnapShot\n");
235 for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage; page < mprot_snap->lastBackingPage; page++) {
236 if (!duplicateMap.contains(mprot_snap->backingRecords[page].basePtrOfPage)) {
237 duplicateMap.put(mprot_snap->backingRecords[page].basePtrOfPage, true);
238 memcpy(mprot_snap->backingRecords[page].basePtrOfPage, &mprot_snap->backingStore[page], sizeof(snapshot_page_t));
241 mprot_snap->lastSnapShot = theID;
242 mprot_snap->lastBackingPage = mprot_snap->snapShots[theID].firstBackingPage;
243 mprot_take_snapshot(); //Make sure current snapshot is still good...All later ones are cleared
246 #else /* !USE_MPROTECT_SNAPSHOT */
248 #define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20)) // 100mb for the shared memory
249 #define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
251 struct fork_snapshotter {
252 /** @brief Pointer to the shared (non-snapshot) memory heap base
253 * (NOTE: this has size SHARED_MEMORY_DEFAULT - sizeof(*fork_snap)) */
254 void *mSharedMemoryBase;
256 /** @brief Pointer to the shared (non-snapshot) stack region */
259 /** @brief Size of the shared stack */
263 * @brief Stores the ID that we are attempting to roll back to
265 * Used in inter-process communication so that each process can
266 * determine whether or not to take over execution (w/ matching ID) or
267 * exit (we're rolling back even further). Dubiously marked 'volatile'
268 * to prevent compiler optimizations from messing with the
269 * inter-process behavior.
271 volatile snapshot_id mIDToRollback;
274 * @brief The context for the shared (non-snapshot) stack
276 * This context is passed between the various processes which represent
277 * various snapshot states. It should be used primarily for the
278 * "client-side" code, not the main snapshot loop.
280 ucontext_t shared_ctxt;
282 /** @brief Inter-process tracking of the next snapshot ID */
283 snapshot_id currSnapShotID;
286 static struct fork_snapshotter *fork_snap = NULL;
289 * These variables are necessary because the stack is shared region and
290 * there exists a race between all processes executing the same function.
291 * To avoid the problem above, we require variables allocated in 'safe' regions.
292 * The bug was actually observed with the forkID, these variables below are
293 * used to indicate the various contexts to which to switch to.
295 * @private_ctxt: the context which is internal to the current process. Used
296 * for running the internal snapshot/rollback loop.
297 * @exit_ctxt: a special context used just for exiting from a process (so we
298 * can use swapcontext() instead of setcontext() + hacks)
299 * @snapshotid: it is a running counter for the various forked processes
300 * snapshotid. it is incremented and set in a persistently shared record
302 static ucontext_t private_ctxt;
303 static ucontext_t exit_ctxt;
304 static snapshot_id snapshotid = 0;
307 * @brief Create a new context, with a given stack and entry function
308 * @param ctxt The context structure to fill
309 * @param stack The stack to run the new context in
310 * @param stacksize The size of the stack
311 * @param func The entry point function for the context
313 static void create_context(ucontext_t *ctxt, void *stack, size_t stacksize,
317 ctxt->uc_stack.ss_sp = stack;
318 ctxt->uc_stack.ss_size = stacksize;
319 makecontext(ctxt, func, 0);
322 /** @brief An empty function, used for an "empty" context which just exits a
324 static void fork_exit()
326 /* Intentionally empty */
329 static void createSharedMemory()
331 //step 1. create shared memory.
332 void *memMapBase = mmap(0, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
333 if (memMapBase == MAP_FAILED) {
338 //Setup snapshot record at top of free region
339 fork_snap = (struct fork_snapshotter *)memMapBase;
340 fork_snap->mSharedMemoryBase = (void *)((uintptr_t)memMapBase + sizeof(*fork_snap));
341 fork_snap->mStackBase = (void *)((uintptr_t)memMapBase + SHARED_MEMORY_DEFAULT);
342 fork_snap->mStackSize = STACK_SIZE_DEFAULT;
343 fork_snap->mIDToRollback = -1;
344 fork_snap->currSnapShotID = 0;
348 * Create a new mspace pointer for the non-snapshotting (i.e., inter-process
349 * shared) memory region. Only for fork-based snapshotting.
351 * @return The shared memory mspace
353 mspace create_shared_mspace()
356 createSharedMemory();
357 return create_mspace_with_base((void *)(fork_snap->mSharedMemoryBase), SHARED_MEMORY_DEFAULT - sizeof(*fork_snap), 1);
360 static void fork_snapshot_init(unsigned int numbackingpages,
361 unsigned int numsnapshots, unsigned int nummemoryregions,
362 unsigned int numheappages, VoidFuncPtr entryPoint)
365 createSharedMemory();
367 void *base_model_snapshot_space = malloc((numheappages + 1) * PAGESIZE);
368 void *pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
369 model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
371 /* setup an "exiting" context */
373 create_context(&exit_ctxt, stack, sizeof(stack), fork_exit);
375 /* setup the shared-stack context */
376 create_context(&fork_snap->shared_ctxt, fork_snap->mStackBase,
377 STACK_SIZE_DEFAULT, entryPoint);
378 /* switch to a new entryPoint context, on a new stack */
379 model_swapcontext(&private_ctxt, &fork_snap->shared_ctxt);
381 /* switch back here when takesnapshot is called */
382 snapshotid = fork_snap->currSnapShotID;
386 fork_snap->currSnapShotID = snapshotid + 1;
390 setcontext(&fork_snap->shared_ctxt);
392 DEBUG("parent PID: %d, child PID: %d, snapshot ID: %d\n",
393 getpid(), forkedID, snapshotid);
395 while (waitpid(forkedID, NULL, 0) < 0) {
396 /* waitpid() may be interrupted */
397 if (errno != EINTR) {
403 if (fork_snap->mIDToRollback != snapshotid)
409 static snapshot_id fork_take_snapshot()
411 model_swapcontext(&fork_snap->shared_ctxt, &private_ctxt);
412 DEBUG("TAKESNAPSHOT RETURN\n");
416 static void fork_roll_back(snapshot_id theID)
419 fork_snap->mIDToRollback = theID;
420 model_swapcontext(&fork_snap->shared_ctxt, &exit_ctxt);
421 fork_snap->mIDToRollback = -1;
424 #endif /* !USE_MPROTECT_SNAPSHOT */
427 * @brief Initializes the snapshot system
428 * @param entryPoint the function that should run the program.
430 void snapshot_system_init(unsigned int numbackingpages,
431 unsigned int numsnapshots, unsigned int nummemoryregions,
432 unsigned int numheappages, VoidFuncPtr entryPoint)
434 #if USE_MPROTECT_SNAPSHOT
435 mprot_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages, entryPoint);
437 fork_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages, entryPoint);
441 /** Assumes that addr is page aligned. */
442 void snapshot_add_memory_region(void *addr, unsigned int numPages)
444 #if USE_MPROTECT_SNAPSHOT
445 mprot_add_to_snapshot(addr, numPages);
447 /* not needed for fork-based snapshotting */
451 /** Takes a snapshot of memory.
452 * @return The snapshot identifier.
454 snapshot_id take_snapshot()
456 #if USE_MPROTECT_SNAPSHOT
457 return mprot_take_snapshot();
459 return fork_take_snapshot();
463 /** Rolls the memory state back to the given snapshot identifier.
464 * @param theID is the snapshot identifier to rollback to.
466 void snapshot_roll_back(snapshot_id theID)
468 #if USE_MPROTECT_SNAPSHOT
469 mprot_roll_back(theID);
471 fork_roll_back(theID);