if (cv->num_threads > num_threads) {
clock = (modelclock_t *)snapshot_realloc(clock, cv->num_threads * sizeof(modelclock_t));
- for (int i= num_threads; i < cv->num_threads; i++)
+ for (int i = num_threads; i < cv->num_threads; i++)
clock[i] = 0;
num_threads = cv->num_threads;
}
void assert_hook(void)
{
- model_print("Add breakpoint to line %u in file %s.\n",__LINE__,__FILE__);
+ model_print("Add breakpoint to line %u in file %s.\n", __LINE__, __FILE__);
}
void model_assert(bool expr, const char *file, int line)
void CycleGraph::dumpGraphToFile(const char *filename)
{
char buffer[200];
- sprintf(buffer, "%s.dot",filename);
+ sprintf(buffer, "%s.dot", filename);
FILE *file = fopen(buffer, "w");
- fprintf(file, "digraph %s {\n",filename);
+ fprintf(file, "digraph %s {\n", filename);
dumpNodes(file);
- fprintf(file,"}\n");
- fclose(file);
+ fprintf(file, "}\n");
+ fclose(file);
}
#endif
*/
bool CycleGraph::checkReachable(CycleNode *from, CycleNode *to)
{
- std::vector<CycleNode *, ModelAlloc<CycleNode *> > queue;
+ std::vector< CycleNode *, ModelAlloc<CycleNode *> > queue;
discovered->reset();
queue.push_back(from);
for (unsigned int i = 0; i < node->getNumEdges(); i++) {
CycleNode *next = node->getEdge(i);
if (!discovered->contains(next)) {
- discovered->put(next,next);
+ discovered->put(next, next);
queue.push_back(next);
}
}
bool CycleGraph::checkPromise(const ModelAction *fromact, Promise *promise)
{
- std::vector<CycleNode *, ModelAlloc<CycleNode *> > queue;
+ std::vector< CycleNode *, ModelAlloc<CycleNode *> > queue;
discovered->reset();
CycleNode *from = actionToNode.get(fromact);
for (unsigned int i = 0; i < node->getNumEdges(); i++) {
CycleNode *next = node->getEdge(i);
if (!discovered->contains(next)) {
- discovered->put(next,next);
+ discovered->put(next, next);
queue.push_back(next);
}
}
void store_8(void *addr, uint8_t val)
{
DEBUG("addr = %p, val = %" PRIu8 "\n", addr, val);
- thread_id_t tid=thread_current()->get_id();
- ClockVector * cv=model->get_cv(tid);
+ thread_id_t tid = thread_current()->get_id();
+ ClockVector *cv = model->get_cv(tid);
raceCheckWrite(tid, addr, cv);
(*(uint8_t *)addr) = val;
}
void store_16(void *addr, uint16_t val)
{
DEBUG("addr = %p, val = %" PRIu16 "\n", addr, val);
- thread_id_t tid=thread_current()->get_id();
- ClockVector * cv=model->get_cv(tid);
+ thread_id_t tid = thread_current()->get_id();
+ ClockVector *cv = model->get_cv(tid);
raceCheckWrite(tid, addr, cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+1), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1), cv);
(*(uint16_t *)addr) = val;
}
void store_32(void *addr, uint32_t val)
{
DEBUG("addr = %p, val = %" PRIu32 "\n", addr, val);
- thread_id_t tid=thread_current()->get_id();
- ClockVector * cv=model->get_cv(tid);
+ thread_id_t tid = thread_current()->get_id();
+ ClockVector *cv = model->get_cv(tid);
raceCheckWrite(tid, addr, cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+1), cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+2), cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+3), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 2), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 3), cv);
(*(uint32_t *)addr) = val;
}
void store_64(void *addr, uint64_t val)
{
DEBUG("addr = %p, val = %" PRIu64 "\n", addr, val);
- thread_id_t tid=thread_current()->get_id();
- ClockVector * cv=model->get_cv(tid);
+ thread_id_t tid = thread_current()->get_id();
+ ClockVector *cv = model->get_cv(tid);
raceCheckWrite(tid, addr, cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+1), cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+2), cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+3), cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+4), cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+5), cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+6), cv);
- raceCheckWrite(tid, (void *)(((uintptr_t)addr)+7), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 2), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 3), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 4), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 5), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 6), cv);
+ raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 7), cv);
(*(uint64_t *)addr) = val;
}
uint8_t load_8(const void *addr)
{
DEBUG("addr = %p\n", addr);
- thread_id_t tid=thread_current()->get_id();
- ClockVector * cv=model->get_cv(tid);
+ thread_id_t tid = thread_current()->get_id();
+ ClockVector *cv = model->get_cv(tid);
raceCheckRead(tid, addr, cv);
return *((uint8_t *)addr);
}
uint16_t load_16(const void *addr)
{
DEBUG("addr = %p\n", addr);
- thread_id_t tid=thread_current()->get_id();
- ClockVector * cv=model->get_cv(tid);
+ thread_id_t tid = thread_current()->get_id();
+ ClockVector *cv = model->get_cv(tid);
raceCheckRead(tid, addr, cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+1), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 1), cv);
return *((uint16_t *)addr);
}
uint32_t load_32(const void *addr)
{
DEBUG("addr = %p\n", addr);
- thread_id_t tid=thread_current()->get_id();
- ClockVector * cv=model->get_cv(tid);
+ thread_id_t tid = thread_current()->get_id();
+ ClockVector *cv = model->get_cv(tid);
raceCheckRead(tid, addr, cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+1), cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+2), cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+3), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 1), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 2), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 3), cv);
return *((uint32_t *)addr);
}
uint64_t load_64(const void *addr)
{
DEBUG("addr = %p\n", addr);
- thread_id_t tid=thread_current()->get_id();
- ClockVector * cv=model->get_cv(tid);
+ thread_id_t tid = thread_current()->get_id();
+ ClockVector *cv = model->get_cv(tid);
raceCheckRead(tid, addr, cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+1), cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+2), cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+3), cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+4), cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+5), cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+6), cv);
- raceCheckRead(tid, (const void *)(((uintptr_t)addr)+7), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 1), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 2), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 3), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 4), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 5), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 6), cv);
+ raceCheckRead(tid, (const void *)(((uintptr_t)addr) + 7), cv);
return *((uint64_t *)addr);
}
#define REQUESTS_BEFORE_ALLOC 1024
-size_t allocatedReqs[ REQUESTS_BEFORE_ALLOC ] = { 0 };
+size_t allocatedReqs[REQUESTS_BEFORE_ALLOC] = { 0 };
int nextRequest = 0;
int howManyFreed = 0;
#if !USE_MPROTECT_SNAPSHOT
ptr = callocp(count, size);
return ptr;
#else
- if (!snapshotrecord) {
+ if (!snapshotrecord)
createSharedMemory();
- }
- if (NULL == sStaticSpace)
- sStaticSpace = create_mspace_with_base(( void *)( snapshotrecord->mSharedMemoryBase), SHARED_MEMORY_DEFAULT -sizeof(struct SnapShot), 1);
+ if (!sStaticSpace)
+ sStaticSpace = create_mspace_with_base((void *)(snapshotrecord->mSharedMemoryBase), SHARED_MEMORY_DEFAULT - sizeof(struct SnapShot), 1);
return mspace_calloc(sStaticSpace, count, size);
#endif
}
ptr = mallocp(size);
return ptr;
#else
- if (!snapshotrecord) {
+ if (!snapshotrecord)
createSharedMemory();
- }
- if (NULL == sStaticSpace)
- sStaticSpace = create_mspace_with_base(( void *)( snapshotrecord->mSharedMemoryBase), SHARED_MEMORY_DEFAULT -sizeof(struct SnapShot), 1);
+ if (!sStaticSpace)
+ sStaticSpace = create_mspace_with_base((void *)(snapshotrecord->mSharedMemoryBase), SHARED_MEMORY_DEFAULT - sizeof(struct SnapShot), 1);
return mspace_malloc(sStaticSpace, size);
#endif
}
/* get address of libc free */
if (!freep) {
- freep = ( void ( * )( void *))dlsym(RTLD_NEXT, "free");
+ freep = (void (*)(void *))dlsym(RTLD_NEXT, "free");
if ((error = dlerror()) != NULL) {
fputs(error, stderr);
exit(EXIT_FAILURE);
#endif
}
-/** Bootstrap allocation. Problem is that the dynamic linker calls
- * require calloc to work and calloc requires the dynamic linker to
- * work. */
+/** Bootstrap allocation. Problem is that the dynamic linker calls require
+ * calloc to work and calloc requires the dynamic linker to work. */
#define BOOTSTRAPBYTES 4096
char bootstrapmemory[BOOTSTRAPBYTES];
exit(EXIT_FAILURE);
}
- void *pointer= (void *)&bootstrapmemory[offset];
+ void *pointer = (void *)&bootstrapmemory[offset];
offset += sz;
return pointer;
}
* that may contain globals and then configures the snapshotting
* library to snapshot them.
*/
-static void SnapshotGlobalSegments(){
+static void SnapshotGlobalSegments()
+{
int pid = getpid();
char buf[9000], execname[100];
FILE *map;
sprintf(execname, "vmmap -interleaved %d", pid);
- map=popen(execname, "r");
+ map = popen(execname, "r");
if (!map) {
perror("popen");
void *begin, *end;
//Skip out at the end of the section
- if (buf[0]=='\n')
+ if (buf[0] == '\n')
break;
sscanf(buf, "%22s %p-%p [%5dK] %c%c%c/%c%c%c SM=%3s %200s\n", type, &begin, &end, &size, &r, &w, &x, &mr, &mw, &mx, smstr, regionname);
* that may contain globals and then configures the snapshotting
* library to snapshot them.
*/
-static void SnapshotGlobalSegments(){
+static void SnapshotGlobalSegments()
+{
char buf[9000];
char binary_name[800];
FILE *map;
}
#endif
-SnapshotStack::SnapshotStack(){
+SnapshotStack::SnapshotStack()
+{
SnapshotGlobalSegments();
- stack=NULL;
+ stack = NULL;
}
-SnapshotStack::~SnapshotStack(){
+SnapshotStack::~SnapshotStack()
+{
}
* @param seqindex is the sequence number to rollback before.
* @return is the sequence number we actually rolled back to.
*/
-int SnapshotStack::backTrackBeforeStep(int seqindex) {
- while(true) {
- if (stack->index<=seqindex) {
+int SnapshotStack::backTrackBeforeStep(int seqindex)
+{
+ while (true) {
+ if (stack->index <= seqindex) {
//have right entry
rollBack(stack->snapshotid);
return stack->index;
}
- struct stackEntry *tmp=stack;
- stack=stack->next;
+ struct stackEntry *tmp = stack;
+ stack = stack->next;
model_free(tmp);
}
}
/** This method takes a snapshot at the given sequence number. */
-void SnapshotStack::snapshotStep(int seqindex) {
- struct stackEntry *tmp=(struct stackEntry *)model_malloc(sizeof(struct stackEntry));
- tmp->next=stack;
- tmp->index=seqindex;
- tmp->snapshotid=takeSnapshot();
- stack=tmp;
+void SnapshotStack::snapshotStep(int seqindex)
+{
+ struct stackEntry *tmp = (struct stackEntry *)model_malloc(sizeof(struct stackEntry));
+ tmp->next = stack;
+ tmp->index = seqindex;
+ tmp->snapshotid = takeSnapshot();
+ stack = tmp;
}
unsigned int numheappages, VoidFuncPtr entryPoint);
struct stackEntry {
- struct stackEntry *next;
- snapshot_id snapshotid;
- int index;
+ struct stackEntry *next;
+ snapshot_id snapshotid;
+ int index;
};
class SnapshotStack {
public:
- MEMALLOC
- SnapshotStack( );
- ~SnapshotStack();
- int backTrackBeforeStep(int seq_index);
- void snapshotStep(int seq_index);
+ SnapshotStack();
+ ~SnapshotStack();
+ int backTrackBeforeStep(int seq_index);
+ void snapshotStep(int seq_index);
+ MEMALLOC
private:
- struct stackEntry * stack;
+ struct stackEntry *stack;
};
/* Not sure what it even means to have more than one snapshot object,
so let's just make a global reference to it.*/
-extern SnapshotStack * snapshotObject;
+extern SnapshotStack *snapshotObject;
#endif
#include "mymemory.h"
#include "common.h"
-#define FAILURE(mesg) { model_print("failed in the API: %s with errno relative message: %s\n", mesg, strerror( errno ) ); exit(EXIT_FAILURE); }
+#define FAILURE(mesg) { model_print("failed in the API: %s with errno relative message: %s\n", mesg, strerror(errno)); exit(EXIT_FAILURE); }
#ifdef CONFIG_SSDEBUG
#define SSDEBUG model_print
#endif
/* extern declaration definition */
-struct SnapShot * snapshotrecord = NULL;
+struct SnapShot *snapshotrecord = NULL;
/** PageAlignedAdressUpdate return a page aligned address for the
* address being added as a side effect the numBytes are also changed.
*/
-static void * PageAlignAddressUpward(void * addr) {
- return (void *)((((uintptr_t)addr)+PAGESIZE-1)&~(PAGESIZE-1));
+static void * PageAlignAddressUpward(void *addr)
+{
+ return (void *)((((uintptr_t)addr) + PAGESIZE - 1) & ~(PAGESIZE - 1));
}
#if !USE_MPROTECT_SNAPSHOT
/** ReturnPageAlignedAddress returns a page aligned address for the
* address being added as a side effect the numBytes are also changed.
*/
-static void * ReturnPageAlignedAddress(void * addr) {
- return (void *)(((uintptr_t)addr)&~(PAGESIZE-1));
+static void * ReturnPageAlignedAddress(void *addr)
+{
+ return (void *)(((uintptr_t)addr) & ~(PAGESIZE - 1));
}
/** The initSnapShotRecord method initialized the snapshotting data
* structures for the mprotect based snapshot.
*/
-static void initSnapShotRecord(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions) {
- snapshotrecord=( struct SnapShot * )model_malloc(sizeof(struct SnapShot));
- snapshotrecord->regionsToSnapShot=( struct MemoryRegion * )model_malloc(sizeof(struct MemoryRegion)*nummemoryregions);
- snapshotrecord->backingStoreBasePtr= ( struct SnapShotPage * )model_malloc( sizeof( struct SnapShotPage ) * (numbackingpages + 1) );
+static void initSnapShotRecord(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions)
+{
+ snapshotrecord = (struct SnapShot *)model_malloc(sizeof(struct SnapShot));
+ snapshotrecord->regionsToSnapShot = (struct MemoryRegion *)model_malloc(sizeof(struct MemoryRegion) * nummemoryregions);
+ snapshotrecord->backingStoreBasePtr = (struct SnapShotPage *)model_malloc(sizeof(struct SnapShotPage) * (numbackingpages + 1));
//Page align the backingstorepages
- snapshotrecord->backingStore=( struct SnapShotPage * )PageAlignAddressUpward(snapshotrecord->backingStoreBasePtr);
- snapshotrecord->backingRecords=( struct BackingPageRecord * )model_malloc(sizeof(struct BackingPageRecord)*numbackingpages);
- snapshotrecord->snapShots= ( struct SnapShotRecord * )model_malloc(sizeof(struct SnapShotRecord)*numsnapshots);
- snapshotrecord->lastSnapShot=0;
- snapshotrecord->lastBackingPage=0;
- snapshotrecord->lastRegion=0;
- snapshotrecord->maxRegions=nummemoryregions;
- snapshotrecord->maxBackingPages=numbackingpages;
- snapshotrecord->maxSnapShots=numsnapshots;
+ snapshotrecord->backingStore = (struct SnapShotPage *)PageAlignAddressUpward(snapshotrecord->backingStoreBasePtr);
+ snapshotrecord->backingRecords = (struct BackingPageRecord *)model_malloc(sizeof(struct BackingPageRecord) * numbackingpages);
+ snapshotrecord->snapShots = (struct SnapShotRecord *)model_malloc(sizeof(struct SnapShotRecord) * numsnapshots);
+ snapshotrecord->lastSnapShot = 0;
+ snapshotrecord->lastBackingPage = 0;
+ snapshotrecord->lastRegion = 0;
+ snapshotrecord->maxRegions = nummemoryregions;
+ snapshotrecord->maxBackingPages = numbackingpages;
+ snapshotrecord->maxSnapShots = numsnapshots;
}
/** HandlePF is the page fault handler for mprotect based snapshotting
* algorithm.
*/
-static void HandlePF( int sig, siginfo_t *si, void * unused){
- if( si->si_code == SEGV_MAPERR ){
+static void HandlePF(int sig, siginfo_t *si, void *unused)
+{
+ if (si->si_code == SEGV_MAPERR) {
model_print("Real Fault at %p\n", si->si_addr);
print_trace();
model_print("For debugging, place breakpoint at: %s:%d\n",
__FILE__, __LINE__);
- exit( EXIT_FAILURE );
+ exit(EXIT_FAILURE);
}
void* addr = ReturnPageAlignedAddress(si->si_addr);
- unsigned int backingpage=snapshotrecord->lastBackingPage++; //Could run out of pages...
- if (backingpage==snapshotrecord->maxBackingPages) {
+ unsigned int backingpage = snapshotrecord->lastBackingPage++; //Could run out of pages...
+ if (backingpage == snapshotrecord->maxBackingPages) {
model_print("Out of backing pages at %p\n", si->si_addr);
- exit( EXIT_FAILURE );
+ exit(EXIT_FAILURE);
}
//copy page
memcpy(&(snapshotrecord->backingStore[backingpage]), addr, sizeof(struct SnapShotPage));
//remember where to copy page back to
- snapshotrecord->backingRecords[backingpage].basePtrOfPage=addr;
+ snapshotrecord->backingRecords[backingpage].basePtrOfPage = addr;
//set protection to read/write
- if (mprotect( addr, sizeof(struct SnapShotPage), PROT_READ | PROT_WRITE )) {
+ if (mprotect(addr, sizeof(struct SnapShotPage), PROT_READ | PROT_WRITE)) {
perror("mprotect");
// Handle error by quitting?
}
#endif /* USE_MPROTECT_SNAPSHOT */
#if !USE_MPROTECT_SNAPSHOT
-void createSharedMemory(){
+void createSharedMemory()
+{
//step 1. create shared memory.
- void * memMapBase = mmap( 0, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANON, -1, 0 );
- if( MAP_FAILED == memMapBase )
+ void *memMapBase = mmap(0, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+ if (MAP_FAILED == memMapBase)
FAILURE("mmap");
//Setup snapshot record at top of free region
- snapshotrecord = ( struct SnapShot * )memMapBase;
+ snapshotrecord = (struct SnapShot *)memMapBase;
snapshotrecord->mSharedMemoryBase = (void *)((uintptr_t)memMapBase + sizeof(struct SnapShot));
snapshotrecord->mStackBase = (void *)((uintptr_t)memMapBase + SHARED_MEMORY_DEFAULT);
snapshotrecord->mStackSize = STACK_SIZE_DEFAULT;
void initSnapshotLibrary(unsigned int numbackingpages,
unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint) {
+ unsigned int numheappages, VoidFuncPtr entryPoint)
+{
/* Setup a stack for our signal handler.... */
stack_t ss;
- ss.ss_sp = PageAlignAddressUpward(model_malloc(SIGSTACKSIZE+PAGESIZE-1));
+ ss.ss_sp = PageAlignAddressUpward(model_malloc(SIGSTACKSIZE + PAGESIZE - 1));
ss.ss_size = SIGSTACKSIZE;
ss.ss_flags = 0;
sigaltstack(&ss, NULL);
struct sigaction sa;
sa.sa_flags = SA_SIGINFO | SA_NODEFER | SA_RESTART | SA_ONSTACK;
- sigemptyset( &sa.sa_mask );
+ sigemptyset(&sa.sa_mask);
sa.sa_sigaction = HandlePF;
#ifdef MAC
- if( sigaction( SIGBUS, &sa, NULL ) == -1 ){
+ if (sigaction(SIGBUS, &sa, NULL) == -1) {
model_print("SIGACTION CANNOT BE INSTALLED\n");
exit(EXIT_FAILURE);
}
#endif
- if( sigaction( SIGSEGV, &sa, NULL ) == -1 ){
+ if (sigaction(SIGSEGV, &sa, NULL) == -1) {
model_print("SIGACTION CANNOT BE INSTALLED\n");
exit(EXIT_FAILURE);
}
siginfo_t si;
memset(&si, 0, sizeof(si));
- si.si_addr=ss.ss_sp;
+ si.si_addr = ss.ss_sp;
HandlePF(SIGSEGV, &si, NULL);
snapshotrecord->lastBackingPage--; //remove the fake page we copied
- void *basemySpace = model_malloc((numheappages+1)*PAGESIZE);
- void * pagealignedbase=PageAlignAddressUpward(basemySpace);
+ void *basemySpace = model_malloc((numheappages + 1) * PAGESIZE);
+ void *pagealignedbase = PageAlignAddressUpward(basemySpace);
user_snapshot_space = create_mspace_with_base(pagealignedbase, numheappages * PAGESIZE, 1);
addMemoryRegionToSnapShot(pagealignedbase, numheappages);
#else
void initSnapshotLibrary(unsigned int numbackingpages,
unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint) {
+ unsigned int numheappages, VoidFuncPtr entryPoint)
+{
if (!snapshotrecord)
createSharedMemory();
//step 2 setup the stack context.
ucontext_t newContext;
- getcontext( &newContext );
+ getcontext(&newContext);
newContext.uc_stack.ss_sp = snapshotrecord->mStackBase;
newContext.uc_stack.ss_size = STACK_SIZE_DEFAULT;
- makecontext( &newContext, entryPoint, 0 );
+ makecontext(&newContext, entryPoint, 0);
/* switch to a new entryPoint context, on a new stack */
swapcontext(&savedSnapshotContext, &newContext);
as the id to which the rollback needs to occur */
bool rollback = false;
- while( true ){
- snapshotrecord->currSnapShotID=snapshotid+1;
+ while (true) {
+ snapshotrecord->currSnapShotID = snapshotid + 1;
forkedID = fork();
- if( 0 == forkedID ){
+ if (0 == forkedID) {
/* If the rollback bool is set, switch to the context we need to
return to during a rollback. */
- if( rollback) {
- setcontext( &( snapshotrecord->mContextToRollback ) );
+ if (rollback) {
+ setcontext(&(snapshotrecord->mContextToRollback));
} else {
/*Child process which is forked as a result of takesnapshot
call should switch back to the takesnapshot context*/
- setcontext( &savedUserSnapshotContext );
+ setcontext(&savedUserSnapshotContext);
}
} else {
int status;
int retVal;
SSDEBUG("The process id of child is %d and the process id of this process is %d and snapshot id is %d\n",
- forkedID, getpid(), snapshotid );
+ forkedID, getpid(), snapshotid);
do {
- retVal=waitpid( forkedID, &status, 0 );
- } while( -1 == retVal && errno == EINTR );
+ retVal = waitpid(forkedID, &status, 0);
+ } while (-1 == retVal && errno == EINTR);
- if( snapshotrecord->mIDToRollback != snapshotid ) {
+ if (snapshotrecord->mIDToRollback != snapshotid) {
exit(EXIT_SUCCESS);
}
rollback = true;
/** The addMemoryRegionToSnapShot function assumes that addr is page aligned.
*/
-void addMemoryRegionToSnapShot( void * addr, unsigned int numPages) {
+void addMemoryRegionToSnapShot(void *addr, unsigned int numPages)
+{
#if USE_MPROTECT_SNAPSHOT
- unsigned int memoryregion=snapshotrecord->lastRegion++;
- if (memoryregion==snapshotrecord->maxRegions) {
+ unsigned int memoryregion = snapshotrecord->lastRegion++;
+ if (memoryregion == snapshotrecord->maxRegions) {
model_print("Exceeded supported number of memory regions!\n");
exit(EXIT_FAILURE);
}
- snapshotrecord->regionsToSnapShot[ memoryregion ].basePtr=addr;
- snapshotrecord->regionsToSnapShot[ memoryregion ].sizeInPages=numPages;
+ snapshotrecord->regionsToSnapShot[memoryregion].basePtr = addr;
+ snapshotrecord->regionsToSnapShot[memoryregion].sizeInPages = numPages;
#endif //NOT REQUIRED IN THE CASE OF FORK BASED SNAPSHOTS.
}
/** The takeSnapshot function takes a snapshot.
* @return The snapshot identifier.
*/
-snapshot_id takeSnapshot( ){
+snapshot_id takeSnapshot()
+{
#if USE_MPROTECT_SNAPSHOT
- for(unsigned int region=0; region<snapshotrecord->lastRegion;region++) {
- if( mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages*sizeof(struct SnapShotPage), PROT_READ ) == -1 ){
+ for (unsigned int region = 0; region < snapshotrecord->lastRegion; region++) {
+ if (mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages * sizeof(struct SnapShotPage), PROT_READ) == -1) {
perror("mprotect");
model_print("Failed to mprotect inside of takeSnapShot\n");
exit(EXIT_FAILURE);
}
}
- unsigned int snapshot=snapshotrecord->lastSnapShot++;
- if (snapshot==snapshotrecord->maxSnapShots) {
+ unsigned int snapshot = snapshotrecord->lastSnapShot++;
+ if (snapshot == snapshotrecord->maxSnapShots) {
model_print("Out of snapshots\n");
exit(EXIT_FAILURE);
}
- snapshotrecord->snapShots[snapshot].firstBackingPage=snapshotrecord->lastBackingPage;
+ snapshotrecord->snapShots[snapshot].firstBackingPage = snapshotrecord->lastBackingPage;
return snapshot;
#else
- swapcontext( &savedUserSnapshotContext, &savedSnapshotContext );
+ swapcontext(&savedUserSnapshotContext, &savedSnapshotContext);
SSDEBUG("TAKESNAPSHOT RETURN\n");
return snapshotid;
#endif
/** The rollBack function rollback to the given snapshot identifier.
* @param theID is the snapshot identifier to rollback to.
*/
-void rollBack( snapshot_id theID ){
-#if USE_MPROTECT_SNAPSHOT==2
- if (snapshotrecord->lastSnapShot==(theID+1)) {
- for(unsigned int page=snapshotrecord->snapShots[theID].firstBackingPage; page<snapshotrecord->lastBackingPage; page++) {
+void rollBack(snapshot_id theID)
+{
+#if USE_MPROTECT_SNAPSHOT == 2
+ if (snapshotrecord->lastSnapShot == (theID + 1)) {
+ for (unsigned int page = snapshotrecord->snapShots[theID].firstBackingPage; page < snapshotrecord->lastBackingPage; page++) {
memcpy(snapshotrecord->backingRecords[page].basePtrOfPage, &snapshotrecord->backingStore[page], sizeof(struct SnapShotPage));
}
return;
#if USE_MPROTECT_SNAPSHOT
HashTable< void *, bool, uintptr_t, 4, model_malloc, model_calloc, model_free> duplicateMap;
- for(unsigned int region=0; region<snapshotrecord->lastRegion;region++) {
- if( mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages*sizeof(struct SnapShotPage), PROT_READ | PROT_WRITE ) == -1 ){
+ for (unsigned int region = 0; region < snapshotrecord->lastRegion; region++) {
+ if (mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages * sizeof(struct SnapShotPage), PROT_READ | PROT_WRITE) == -1) {
perror("mprotect");
model_print("Failed to mprotect inside of takeSnapShot\n");
exit(EXIT_FAILURE);
}
}
- for(unsigned int page=snapshotrecord->snapShots[theID].firstBackingPage; page<snapshotrecord->lastBackingPage; page++) {
- if( !duplicateMap.contains(snapshotrecord->backingRecords[page].basePtrOfPage )) {
+ for (unsigned int page = snapshotrecord->snapShots[theID].firstBackingPage; page < snapshotrecord->lastBackingPage; page++) {
+ if (!duplicateMap.contains(snapshotrecord->backingRecords[page].basePtrOfPage)) {
duplicateMap.put(snapshotrecord->backingRecords[page].basePtrOfPage, true);
memcpy(snapshotrecord->backingRecords[page].basePtrOfPage, &snapshotrecord->backingStore[page], sizeof(struct SnapShotPage));
}
}
- snapshotrecord->lastSnapShot=theID;
- snapshotrecord->lastBackingPage=snapshotrecord->snapShots[theID].firstBackingPage;
+ snapshotrecord->lastSnapShot = theID;
+ snapshotrecord->lastBackingPage = snapshotrecord->snapShots[theID].firstBackingPage;
takeSnapshot(); //Make sure current snapshot is still good...All later ones are cleared
#else
snapshotrecord->mIDToRollback = theID;
volatile int sTemp = 0;
- getcontext( &snapshotrecord->mContextToRollback );
+ getcontext(&snapshotrecord->mContextToRollback);
/*
* This is used to quit the process on rollback, so that the process
* which needs to rollback can quit allowing the process whose
* snapshotid matches the rollbackid to switch to this context and
* continue....
*/
- if( !sTemp ){
+ if (!sTemp) {
sTemp = 1;
SSDEBUG("Invoked rollback\n");
exit(EXIT_SUCCESS);
}
/*
* This fix obviates the need for a finalize call. hence less dependences for model-checker....
- *
*/
snapshotrecord->mIDToRollback = -1;
#endif
}
-
#include "snapshot-interface.h"
#include "config.h"
-void addMemoryRegionToSnapShot( void * ptr, unsigned int numPages );
-snapshot_id takeSnapshot( );
-void rollBack( snapshot_id theSnapShot );
+void addMemoryRegionToSnapShot(void *ptr, unsigned int numPages);
+snapshot_id takeSnapshot();
+void rollBack(snapshot_id theSnapShot);
#if !USE_MPROTECT_SNAPSHOT
void createSharedMemory();
//List the base address of the corresponding page in the backing store so we know where to copy it to
struct BackingPageRecord {
- void * basePtrOfPage;
+ void *basePtrOfPage;
};
//Stuct for each memory region
struct MemoryRegion {
- void * basePtr; //base of memory region
+ void *basePtr; //base of memory region
int sizeInPages; //size of memory region in pages
};
//Primary struct for snapshotting system....
struct SnapShot {
- struct MemoryRegion * regionsToSnapShot; //This pointer references an array of memory regions to snapshot
- struct SnapShotPage * backingStore; //This pointer references an array of snapshotpage's that form the backing store
- void * backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
- struct BackingPageRecord * backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
- struct SnapShotRecord * snapShots; //This pointer references the snapshot array
+ struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
+ struct SnapShotPage *backingStore; //This pointer references an array of snapshotpage's that form the backing store
+ void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
+ struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
+ struct SnapShotRecord *snapShots; //This pointer references the snapshot array
unsigned int lastSnapShot; //Stores the next snapshot record we should use
unsigned int lastBackingPage; //Stores the next backingpage we should use
#endif
//Global reference to snapshot data structure
-extern struct SnapShot * snapshotrecord;
+extern struct SnapShot *snapshotrecord;
#endif /* __SNAPSHOTIMP_H__ */