#include <string.h>
#include <errno.h>
#include <sys/wait.h>
-#include <ucontext.h>
#include "hashtable.h"
#include "snapshot.h"
#include "mymemory.h"
#include "common.h"
-
-#define FAILURE(mesg) { model_print("failed in the API: %s with errno relative message: %s\n", mesg, strerror(errno)); exit(EXIT_FAILURE); }
+#include "context.h"
+#include "model.h"
/** PageAlignedAdressUpdate return a page aligned address for the
* address being added as a side effect the numBytes are also changed.
/* Struct for each memory region */
struct MemoryRegion {
- void *basePtr; // base of memory region
- int sizeInPages; // size of memory region in pages
+ void *basePtr; // base of memory region
+ int sizeInPages; // size of memory region in pages
};
/** ReturnPageAlignedAddress returns a page aligned address for the
/* Primary struct for snapshotting system */
struct mprot_snapshotter {
mprot_snapshotter(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions);
+ ~mprot_snapshotter();
- struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
- snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
- void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
- struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
- struct SnapShotRecord *snapShots; //This pointer references the snapshot array
+ struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
+ snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
+ void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
+ struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
+ struct SnapShotRecord *snapShots; //This pointer references the snapshot array
- unsigned int lastSnapShot; //Stores the next snapshot record we should use
- unsigned int lastBackingPage; //Stores the next backingpage we should use
- unsigned int lastRegion; //Stores the next memory region to be used
+ unsigned int lastSnapShot; //Stores the next snapshot record we should use
+ unsigned int lastBackingPage; //Stores the next backingpage we should use
+ unsigned int lastRegion; //Stores the next memory region to be used
- unsigned int maxRegions; //Stores the max number of memory regions we support
- unsigned int maxBackingPages; //Stores the total number of backing pages
- unsigned int maxSnapShots; //Stores the total number of snapshots we allow
+ unsigned int maxRegions; //Stores the max number of memory regions we support
+ unsigned int maxBackingPages; //Stores the total number of backing pages
+ unsigned int maxSnapShots; //Stores the total number of snapshots we allow
MEMALLOC
};
snapShots = (struct SnapShotRecord *)model_malloc(sizeof(struct SnapShotRecord) * snapshots);
}
-/** HandlePF is the page fault handler for mprotect based snapshotting
+mprot_snapshotter::~mprot_snapshotter()
+{
+ model_free(regionsToSnapShot);
+ model_free(backingStoreBasePtr);
+ model_free(backingRecords);
+ model_free(snapShots);
+}
+
+/** mprot_handle_pf is the page fault handler for mprotect based snapshotting
* algorithm.
*/
-static void HandlePF(int sig, siginfo_t *si, void *unused)
+static void mprot_handle_pf(int sig, siginfo_t *si, void *unused)
{
if (si->si_code == SEGV_MAPERR) {
- model_print("Real Fault at %p\n", si->si_addr);
- print_trace();
+ model_print("Segmentation fault at %p\n", si->si_addr);
model_print("For debugging, place breakpoint at: %s:%d\n",
- __FILE__, __LINE__);
+ __FILE__, __LINE__);
+ // print_trace(); // Trace printing may cause dynamic memory allocation
exit(EXIT_FAILURE);
}
void* addr = ReturnPageAlignedAddress(si->si_addr);
- unsigned int backingpage = mprot_snap->lastBackingPage++; //Could run out of pages...
+ unsigned int backingpage = mprot_snap->lastBackingPage++; //Could run out of pages...
if (backingpage == mprot_snap->maxBackingPages) {
model_print("Out of backing pages at %p\n", si->si_addr);
exit(EXIT_FAILURE);
}
static void mprot_snapshot_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint)
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages)
{
/* Setup a stack for our signal handler.... */
stack_t ss;
struct sigaction sa;
sa.sa_flags = SA_SIGINFO | SA_NODEFER | SA_RESTART | SA_ONSTACK;
sigemptyset(&sa.sa_mask);
- sa.sa_sigaction = HandlePF;
+ sa.sa_sigaction = mprot_handle_pf;
#ifdef MAC
if (sigaction(SIGBUS, &sa, NULL) == -1) {
- model_print("SIGACTION CANNOT BE INSTALLED\n");
+ perror("sigaction(SIGBUS)");
exit(EXIT_FAILURE);
}
#endif
if (sigaction(SIGSEGV, &sa, NULL) == -1) {
- model_print("SIGACTION CANNOT BE INSTALLED\n");
+ perror("sigaction(SIGSEGV)");
exit(EXIT_FAILURE);
}
mprot_snap = new mprot_snapshotter(numbackingpages, numsnapshots, nummemoryregions);
- // EVIL HACK: We need to make sure that calls into the HandlePF method don't cause dynamic links
+ // EVIL HACK: We need to make sure that calls into the mprot_handle_pf method don't cause dynamic links
// The problem is that we end up protecting state in the dynamic linker...
// Solution is to call our signal handler before we start protecting stuff...
siginfo_t si;
memset(&si, 0, sizeof(si));
si.si_addr = ss.ss_sp;
- HandlePF(SIGSEGV, &si, NULL);
- mprot_snap->lastBackingPage--; //remove the fake page we copied
+ mprot_handle_pf(SIGSEGV, &si, NULL);
+ mprot_snap->lastBackingPage--; //remove the fake page we copied
void *basemySpace = model_malloc((numheappages + 1) * PAGESIZE);
void *pagealignedbase = PageAlignAddressUpward(basemySpace);
user_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
- addMemoryRegionToSnapShot(pagealignedbase, numheappages);
+ snapshot_add_memory_region(pagealignedbase, numheappages);
void *base_model_snapshot_space = model_malloc((numheappages + 1) * PAGESIZE);
pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
- addMemoryRegionToSnapShot(pagealignedbase, numheappages);
+ snapshot_add_memory_region(pagealignedbase, numheappages);
+}
- entryPoint();
+static void mprot_startExecution(ucontext_t * context, VoidFuncPtr entryPoint) {
+ /* setup the shared-stack context */
+ create_context(context, fork_snap->mStackBase, model_calloc(STACK_SIZE_DEFAULT, 1), STACK_SIZE_DEFAULT, entryPoint);
}
static void mprot_add_to_snapshot(void *addr, unsigned int numPages)
exit(EXIT_FAILURE);
}
+ DEBUG("snapshot region %p-%p (%u page%s)\n",
+ addr, (char *)addr + numPages * PAGESIZE, numPages,
+ numPages > 1 ? "s" : "");
mprot_snap->regionsToSnapShot[memoryregion].basePtr = addr;
mprot_snap->regionsToSnapShot[memoryregion].sizeInPages = numPages;
}
static snapshot_id mprot_take_snapshot()
{
- for (unsigned int region = 0; region < mprot_snap->lastRegion; region++) {
+ for (unsigned int region = 0;region < mprot_snap->lastRegion;region++) {
if (mprotect(mprot_snap->regionsToSnapShot[region].basePtr, mprot_snap->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ) == -1) {
perror("mprotect");
model_print("Failed to mprotect inside of takeSnapShot\n");
{
#if USE_MPROTECT_SNAPSHOT == 2
if (mprot_snap->lastSnapShot == (theID + 1)) {
- for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage; page < mprot_snap->lastBackingPage; page++) {
+ for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage;page < mprot_snap->lastBackingPage;page++) {
memcpy(mprot_snap->backingRecords[page].basePtrOfPage, &mprot_snap->backingStore[page], sizeof(snapshot_page_t));
}
return;
#endif
HashTable< void *, bool, uintptr_t, 4, model_malloc, model_calloc, model_free> duplicateMap;
- for (unsigned int region = 0; region < mprot_snap->lastRegion; region++) {
+ for (unsigned int region = 0;region < mprot_snap->lastRegion;region++) {
if (mprotect(mprot_snap->regionsToSnapShot[region].basePtr, mprot_snap->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ | PROT_WRITE) == -1) {
perror("mprotect");
model_print("Failed to mprotect inside of takeSnapShot\n");
exit(EXIT_FAILURE);
}
}
- for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage; page < mprot_snap->lastBackingPage; page++) {
+ for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage;page < mprot_snap->lastBackingPage;page++) {
if (!duplicateMap.contains(mprot_snap->backingRecords[page].basePtrOfPage)) {
duplicateMap.put(mprot_snap->backingRecords[page].basePtrOfPage, true);
memcpy(mprot_snap->backingRecords[page].basePtrOfPage, &mprot_snap->backingStore[page], sizeof(snapshot_page_t));
}
mprot_snap->lastSnapShot = theID;
mprot_snap->lastBackingPage = mprot_snap->snapShots[theID].firstBackingPage;
- mprot_take_snapshot(); //Make sure current snapshot is still good...All later ones are cleared
+ mprot_take_snapshot(); //Make sure current snapshot is still good...All later ones are cleared
}
-#else /* !USE_MPROTECT_SNAPSHOT */
-
-#include <ucontext.h>
+#else /* !USE_MPROTECT_SNAPSHOT */
-#define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20)) // 100mb for the shared memory
-#define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
+#define SHARED_MEMORY_DEFAULT (200 * ((size_t)1 << 20)) // 100mb for the shared memory
+#define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
struct fork_snapshotter {
+ /** @brief Pointer to the shared (non-snapshot) memory heap base
+ * (NOTE: this has size SHARED_MEMORY_DEFAULT - sizeof(*fork_snap)) */
void *mSharedMemoryBase;
+
+ /** @brief Pointer to the shared (non-snapshot) stack region */
void *mStackBase;
+
+ /** @brief Size of the shared stack */
size_t mStackSize;
+
+ /**
+ * @brief Stores the ID that we are attempting to roll back to
+ *
+ * Used in inter-process communication so that each process can
+ * determine whether or not to take over execution (w/ matching ID) or
+ * exit (we're rolling back even further). Dubiously marked 'volatile'
+ * to prevent compiler optimizations from messing with the
+ * inter-process behavior.
+ */
volatile snapshot_id mIDToRollback;
- ucontext_t mContextToRollback;
+
+ /**
+ * @brief The context for the shared (non-snapshot) stack
+ *
+ * This context is passed between the various processes which represent
+ * various snapshot states. It should be used primarily for the
+ * "client-side" code, not the main snapshot loop.
+ */
+ ucontext_t shared_ctxt;
+
+ /** @brief Inter-process tracking of the next snapshot ID */
snapshot_id currSnapShotID;
};
static struct fork_snapshotter *fork_snap = NULL;
/** @statics
-* These variables are necessary because the stack is shared region and
-* there exists a race between all processes executing the same function.
-* To avoid the problem above, we require variables allocated in 'safe' regions.
-* The bug was actually observed with the forkID, these variables below are
-* used to indicate the various contexts to which to switch to.
-*
-* @savedSnapshotContext: contains the point to which takesnapshot() call should switch to.
-* @savedUserSnapshotContext: contains the point to which the process whose snapshotid is equal to the rollbackid should switch to
-* @snapshotid: it is a running counter for the various forked processes snapshotid. it is incremented and set in a persistently shared record
-*/
-static ucontext_t savedSnapshotContext;
-static ucontext_t savedUserSnapshotContext;
+ * These variables are necessary because the stack is shared region and
+ * there exists a race between all processes executing the same function.
+ * To avoid the problem above, we require variables allocated in 'safe' regions.
+ * The bug was actually observed with the forkID, these variables below are
+ * used to indicate the various contexts to which to switch to.
+ *
+ * @private_ctxt: the context which is internal to the current process. Used
+ * for running the internal snapshot/rollback loop.
+ * @exit_ctxt: a special context used just for exiting from a process (so we
+ * can use swapcontext() instead of setcontext() + hacks)
+ * @snapshotid: it is a running counter for the various forked processes
+ * snapshotid. it is incremented and set in a persistently shared record
+ */
+static ucontext_t private_ctxt;
+static ucontext_t exit_ctxt;
static snapshot_id snapshotid = 0;
+/**
+ * @brief Create a new context, with a given stack and entry function
+ * @param ctxt The context structure to fill
+ * @param stack The stack to run the new context in
+ * @param stacksize The size of the stack
+ * @param func The entry point function for the context
+ */
+static void create_context(ucontext_t *ctxt, void *stack, size_t stacksize,
+ void (*func)(void))
+{
+ getcontext(ctxt);
+ ctxt->uc_stack.ss_sp = stack;
+ ctxt->uc_stack.ss_size = stacksize;
+ ctxt->uc_link = NULL;
+ makecontext(ctxt, func, 0);
+}
+
+/** @brief An empty function, used for an "empty" context which just exits a
+ * process */
+static void fork_exit()
+{
+ _Exit(EXIT_SUCCESS);
+}
+
static void createSharedMemory()
{
//step 1. create shared memory.
void *memMapBase = mmap(0, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
- if (MAP_FAILED == memMapBase)
- FAILURE("mmap");
+ if (memMapBase == MAP_FAILED) {
+ perror("mmap");
+ exit(EXIT_FAILURE);
+ }
//Setup snapshot record at top of free region
fork_snap = (struct fork_snapshotter *)memMapBase;
- fork_snap->mSharedMemoryBase = (void *)((uintptr_t)memMapBase + sizeof(struct fork_snapshotter));
+ fork_snap->mSharedMemoryBase = (void *)((uintptr_t)memMapBase + sizeof(*fork_snap));
fork_snap->mStackBase = (void *)((uintptr_t)memMapBase + SHARED_MEMORY_DEFAULT);
fork_snap->mStackSize = STACK_SIZE_DEFAULT;
fork_snap->mIDToRollback = -1;
{
if (!fork_snap)
createSharedMemory();
- return create_mspace_with_base((void *)(fork_snap->mSharedMemoryBase), SHARED_MEMORY_DEFAULT - sizeof(struct fork_snapshotter), 1);
+ return create_mspace_with_base((void *)(fork_snap->mSharedMemoryBase), SHARED_MEMORY_DEFAULT - sizeof(*fork_snap), 1);
}
static void fork_snapshot_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint)
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages)
{
if (!fork_snap)
createSharedMemory();
- void *base_model_snapshot_space = malloc((numheappages + 1) * PAGESIZE);
- void *pagealignedbase = PageAlignAddressUpward(base_model_snapshot_space);
- model_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
-
- //step 2 setup the stack context.
- ucontext_t newContext;
- getcontext(&newContext);
- newContext.uc_stack.ss_sp = fork_snap->mStackBase;
- newContext.uc_stack.ss_size = STACK_SIZE_DEFAULT;
- makecontext(&newContext, entryPoint, 0);
- /* switch to a new entryPoint context, on a new stack */
- swapcontext(&savedSnapshotContext, &newContext);
+ model_snapshot_space = create_mspace(numheappages * PAGESIZE, 1);
+}
+static void fork_loop() {
/* switch back here when takesnapshot is called */
- pid_t forkedID = 0;
snapshotid = fork_snap->currSnapShotID;
- /* This bool indicates that the current process's snapshotid is same
- as the id to which the rollback needs to occur */
+ if (model->params.nofork) {
+ setcontext(&fork_snap->shared_ctxt);
+ exit(EXIT_SUCCESS);
+ }
- bool rollback = false;
while (true) {
+ pid_t forkedID;
fork_snap->currSnapShotID = snapshotid + 1;
forkedID = fork();
if (0 == forkedID) {
- /* If the rollback bool is set, switch to the context we need to
- return to during a rollback. */
- if (rollback) {
- setcontext(&(fork_snap->mContextToRollback));
- } else {
- /*Child process which is forked as a result of takesnapshot
- call should switch back to the takesnapshot context*/
- setcontext(&savedUserSnapshotContext);
- }
+ setcontext(&fork_snap->shared_ctxt);
} else {
- int status;
- int retVal;
-
- DEBUG("The process id of child is %d and the process id of this process is %d and snapshot id is %d\n",
- forkedID, getpid(), snapshotid);
-
- do {
- retVal = waitpid(forkedID, &status, 0);
- } while (-1 == retVal && errno == EINTR);
+ DEBUG("parent PID: %d, child PID: %d, snapshot ID: %d\n",
+ getpid(), forkedID, snapshotid);
+
+ while (waitpid(forkedID, NULL, 0) < 0) {
+ /* waitpid() may be interrupted */
+ if (errno != EINTR) {
+ perror("waitpid");
+ exit(EXIT_FAILURE);
+ }
+ }
- if (fork_snap->mIDToRollback != snapshotid) {
+ if (fork_snap->mIDToRollback != snapshotid)
exit(EXIT_SUCCESS);
- }
- rollback = true;
}
}
}
-static snapshot_id fork_take_snapshot()
-{
- swapcontext(&savedUserSnapshotContext, &savedSnapshotContext);
+static void fork_startExecution(ucontext_t *context, VoidFuncPtr entryPoint) {
+ /* setup an "exiting" context */
+ char stack[128];
+ create_context(&exit_ctxt, stack, sizeof(stack), fork_exit);
+
+ /* setup the system context */
+ create_context(context, fork_snap->mStackBase, STACK_SIZE_DEFAULT, entryPoint);
+ /* switch to a new entryPoint context, on a new stack */
+ create_context(&private_ctxt, snapshot_calloc(STACK_SIZE_DEFAULT, 1), STACK_SIZE_DEFAULT, fork_loop);
+}
+
+static snapshot_id fork_take_snapshot() {
+ model_swapcontext(&fork_snap->shared_ctxt, &private_ctxt);
DEBUG("TAKESNAPSHOT RETURN\n");
return snapshotid;
}
static void fork_roll_back(snapshot_id theID)
{
+ DEBUG("Rollback\n");
fork_snap->mIDToRollback = theID;
- volatile int sTemp = 0;
- getcontext(&fork_snap->mContextToRollback);
- /*
- * This is used to quit the process on rollback, so that the process
- * which needs to rollback can quit allowing the process whose
- * snapshotid matches the rollbackid to switch to this context and
- * continue....
- */
- if (!sTemp) {
- sTemp = 1;
- DEBUG("Invoked rollback\n");
- exit(EXIT_SUCCESS);
- }
- /*
- * This fix obviates the need for a finalize call. hence less dependences for model-checker....
- */
+ model_swapcontext(&fork_snap->shared_ctxt, &exit_ctxt);
fork_snap->mIDToRollback = -1;
}
-#endif /* !USE_MPROTECT_SNAPSHOT */
+#endif /* !USE_MPROTECT_SNAPSHOT */
-/** The initSnapshotLibrary function initializes the snapshot library.
- * @param entryPoint the function that should run the program.
+/**
+ * @brief Initializes the snapshot system
+ * @param entryPoint the function that should run the program.
*/
-void initSnapshotLibrary(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint)
+void snapshot_system_init(unsigned int numbackingpages,
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages)
+{
+#if USE_MPROTECT_SNAPSHOT
+ mprot_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages);
+#else
+ fork_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages);
+#endif
+}
+
+void startExecution(ucontext_t *context, VoidFuncPtr entryPoint)
{
#if USE_MPROTECT_SNAPSHOT
- mprot_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages, entryPoint);
+ mprot_startExecution(context, entryPoint);
#else
- fork_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages, entryPoint);
+ fork_startExecution(context, entryPoint);
#endif
}
-/** The addMemoryRegionToSnapShot function assumes that addr is page aligned. */
-void addMemoryRegionToSnapShot(void *addr, unsigned int numPages)
+/** Assumes that addr is page aligned. */
+void snapshot_add_memory_region(void *addr, unsigned int numPages)
{
#if USE_MPROTECT_SNAPSHOT
mprot_add_to_snapshot(addr, numPages);
#endif
}
-/** The takeSnapshot function takes a snapshot.
+/** Takes a snapshot of memory.
* @return The snapshot identifier.
*/
-snapshot_id takeSnapshot()
+snapshot_id take_snapshot()
{
#if USE_MPROTECT_SNAPSHOT
return mprot_take_snapshot();
#endif
}
-/** The rollBack function rollback to the given snapshot identifier.
+/** Rolls the memory state back to the given snapshot identifier.
* @param theID is the snapshot identifier to rollback to.
*/
-void rollBack(snapshot_id theID)
+void snapshot_roll_back(snapshot_id theID)
{
#if USE_MPROTECT_SNAPSHOT
mprot_roll_back(theID);