10 #include "snapshotimp.h"
15 #include <semaphore.h>
20 #define FAILURE(mesg) { printf("failed in the API: %s with errno relative message: %s\n", mesg, strerror( errno ) ); exit(EXIT_FAILURE); }
23 #define SSDEBUG printf
25 #define SSDEBUG(...) do { } while (0)
28 /* extern declaration definition */
29 struct SnapShot * snapshotrecord = NULL;
31 #if !USE_MPROTECT_SNAPSHOT
33 * These variables are necessary because the stack is shared region and
34 * there exists a race between all processes executing the same function.
35 * To avoid the problem above, we require variables allocated in 'safe' regions.
36 * The bug was actually observed with the forkID, these variables below are
37 * used to indicate the various contexts to which to switch to.
39 * @savedSnapshotContext: contains the point to which takesnapshot() call should switch to.
40 * @savedUserSnapshotContext: contains the point to which the process whose snapshotid is equal to the rollbackid should switch to
41 * @snapshotid: it is a running counter for the various forked processes snapshotid. it is incremented and set in a persistently shared record
43 static ucontext_t savedSnapshotContext;
44 static ucontext_t savedUserSnapshotContext;
45 static snapshot_id snapshotid = 0;
48 /** PageAlignedAdressUpdate return a page aligned address for the
49 * address being added as a side effect the numBytes are also changed.
51 static void * PageAlignAddressUpward(void * addr) {
52 return (void *)((((uintptr_t)addr)+PAGESIZE-1)&~(PAGESIZE-1));
55 #if USE_MPROTECT_SNAPSHOT
57 /** ReturnPageAlignedAddress returns a page aligned address for the
58 * address being added as a side effect the numBytes are also changed.
60 static void * ReturnPageAlignedAddress(void * addr) {
61 return (void *)(((uintptr_t)addr)&~(PAGESIZE-1));
64 /** The initSnapShotRecord method initialized the snapshotting data
65 * structures for the mprotect based snapshot.
67 static void initSnapShotRecord(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions) {
68 snapshotrecord=( struct SnapShot * )MYMALLOC(sizeof(struct SnapShot));
69 snapshotrecord->regionsToSnapShot=( struct MemoryRegion * )MYMALLOC(sizeof(struct MemoryRegion)*nummemoryregions);
70 snapshotrecord->backingStoreBasePtr= ( struct SnapShotPage * )MYMALLOC( sizeof( struct SnapShotPage ) * (numbackingpages + 1) );
71 //Page align the backingstorepages
72 snapshotrecord->backingStore=( struct SnapShotPage * )PageAlignAddressUpward(snapshotrecord->backingStoreBasePtr);
73 snapshotrecord->backingRecords=( struct BackingPageRecord * )MYMALLOC(sizeof(struct BackingPageRecord)*numbackingpages);
74 snapshotrecord->snapShots= ( struct SnapShotRecord * )MYMALLOC(sizeof(struct SnapShotRecord)*numsnapshots);
75 snapshotrecord->lastSnapShot=0;
76 snapshotrecord->lastBackingPage=0;
77 snapshotrecord->lastRegion=0;
78 snapshotrecord->maxRegions=nummemoryregions;
79 snapshotrecord->maxBackingPages=numbackingpages;
80 snapshotrecord->maxSnapShots=numsnapshots;
83 /** HandlePF is the page fault handler for mprotect based snapshotting
86 static void HandlePF( int sig, siginfo_t *si, void * unused){
87 if( si->si_code == SEGV_MAPERR ){
88 printf("Real Fault at %p\n", si->si_addr);
91 void* addr = ReturnPageAlignedAddress(si->si_addr);
93 unsigned int backingpage=snapshotrecord->lastBackingPage++; //Could run out of pages...
94 if (backingpage==snapshotrecord->maxBackingPages) {
95 printf("Out of backing pages at %p\n", si->si_addr);
100 memcpy(&(snapshotrecord->backingStore[backingpage]), addr, sizeof(struct SnapShotPage));
101 //remember where to copy page back to
102 snapshotrecord->backingRecords[backingpage].basePtrOfPage=addr;
103 //set protection to read/write
104 if (mprotect( addr, sizeof(struct SnapShotPage), PROT_READ | PROT_WRITE )) {
106 // Handle error by quitting?
109 #endif //nothing to handle for non snapshotting case.
111 #if !USE_MPROTECT_SNAPSHOT
112 void createSharedMemory(){
113 //step 1. create shared memory.
114 void * memMapBase = mmap( 0, SHARED_MEMORY_DEFAULT + STACK_SIZE_DEFAULT, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANON, -1, 0 );
115 if( MAP_FAILED == memMapBase )
118 //Setup snapshot record at top of free region
119 snapshotrecord = ( struct SnapShot * )memMapBase;
120 snapshotrecord->mSharedMemoryBase = (void *)((uintptr_t)memMapBase + sizeof(struct SnapShot));
121 snapshotrecord->mStackBase = (void *)((uintptr_t)memMapBase + SHARED_MEMORY_DEFAULT);
122 snapshotrecord->mStackSize = STACK_SIZE_DEFAULT;
123 snapshotrecord->mIDToRollback = -1;
124 snapshotrecord->currSnapShotID = 0;
129 /** The initSnapShotLibrary function initializes the Snapshot library.
130 * @param entryPoint the function that should run the program.
132 #if USE_MPROTECT_SNAPSHOT
134 void initSnapShotLibrary(unsigned int numbackingpages,
135 unsigned int numsnapshots, unsigned int nummemoryregions,
136 unsigned int numheappages, VoidFuncPtr entryPoint) {
137 /* Setup a stack for our signal handler.... */
139 ss.ss_sp = MYMALLOC(SIGSTACKSIZE);
140 ss.ss_size = SIGSTACKSIZE;
142 sigaltstack(&ss, NULL);
145 sa.sa_flags = SA_SIGINFO | SA_NODEFER | SA_RESTART | SA_ONSTACK;
146 sigemptyset( &sa.sa_mask );
147 sa.sa_sigaction = HandlePF;
149 if( sigaction( SIGBUS, &sa, NULL ) == -1 ){
150 printf("SIGACTION CANNOT BE INSTALLED\n");
154 if( sigaction( SIGSEGV, &sa, NULL ) == -1 ){
155 printf("SIGACTION CANNOT BE INSTALLED\n");
159 initSnapShotRecord(numbackingpages, numsnapshots, nummemoryregions);
161 // EVIL HACK: We need to make sure that calls into the HandlePF method don't cause dynamic links
162 // The problem is that we end up protecting state in the dynamic linker...
163 // Solution is to call our signal handler before we start protecting stuff...
166 memset(&si, 0, sizeof(si));
168 HandlePF(SIGSEGV, &si, NULL);
169 snapshotrecord->lastBackingPage--; //remove the fake page we copied
171 basemySpace=MYMALLOC((numheappages+1)*PAGESIZE);
172 void * pagealignedbase=PageAlignAddressUpward(basemySpace);
173 mySpace = create_mspace_with_base(pagealignedbase, numheappages*PAGESIZE, 1 );
174 addMemoryRegionToSnapShot(pagealignedbase, numheappages);
178 void initSnapShotLibrary(unsigned int numbackingpages,
179 unsigned int numsnapshots, unsigned int nummemoryregions,
180 unsigned int numheappages, VoidFuncPtr entryPoint) {
181 basemySpace=system_malloc((numheappages+1)*PAGESIZE);
182 void * pagealignedbase=PageAlignAddressUpward(basemySpace);
183 mySpace = create_mspace_with_base(pagealignedbase, numheappages*PAGESIZE, 1 );
185 createSharedMemory();
187 //step 2 setup the stack context.
188 ucontext_t newContext;
189 getcontext( &newContext );
190 newContext.uc_stack.ss_sp = snapshotrecord->mStackBase;
191 newContext.uc_stack.ss_size = STACK_SIZE_DEFAULT;
192 makecontext( &newContext, entryPoint, 0 );
193 /* switch to a new entryPoint context, on a new stack */
194 swapcontext(&savedSnapshotContext, &newContext);
196 /* switch back here when takesnapshot is called */
198 snapshotid = snapshotrecord->currSnapShotID;
199 /* This bool indicates that the current process's snapshotid is same
200 as the id to which the rollback needs to occur */
202 bool rollback = false;
204 snapshotrecord->currSnapShotID=snapshotid+1;
208 /* If the rollback bool is set, switch to the context we need to
209 return to during a rollback. */
211 setcontext( &( snapshotrecord->mContextToRollback ) );
213 /*Child process which is forked as a result of takesnapshot
214 call should switch back to the takesnapshot context*/
215 setcontext( &savedUserSnapshotContext );
221 SSDEBUG("The process id of child is %d and the process id of this process is %d and snapshot id is %d\n",
222 forkedID, getpid(), snapshotid );
225 retVal=waitpid( forkedID, &status, 0 );
226 } while( -1 == retVal && errno == EINTR );
228 if( snapshotrecord->mIDToRollback != snapshotid ) {
237 /** The addMemoryRegionToSnapShot function assumes that addr is page aligned.
239 void addMemoryRegionToSnapShot( void * addr, unsigned int numPages) {
240 #if USE_MPROTECT_SNAPSHOT
241 unsigned int memoryregion=snapshotrecord->lastRegion++;
242 if (memoryregion==snapshotrecord->maxRegions) {
243 printf("Exceeded supported number of memory regions!\n");
247 snapshotrecord->regionsToSnapShot[ memoryregion ].basePtr=addr;
248 snapshotrecord->regionsToSnapShot[ memoryregion ].sizeInPages=numPages;
249 #endif //NOT REQUIRED IN THE CASE OF FORK BASED SNAPSHOTS.
252 /** The takeSnapshot function takes a snapshot.
253 * @return The snapshot identifier.
255 snapshot_id takeSnapshot( ){
256 #if USE_MPROTECT_SNAPSHOT
257 for(unsigned int region=0; region<snapshotrecord->lastRegion;region++) {
258 if( mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages*sizeof(struct SnapShotPage), PROT_READ ) == -1 ){
260 printf("Failed to mprotect inside of takeSnapShot\n");
264 unsigned int snapshot=snapshotrecord->lastSnapShot++;
265 if (snapshot==snapshotrecord->maxSnapShots) {
266 printf("Out of snapshots\n");
269 snapshotrecord->snapShots[snapshot].firstBackingPage=snapshotrecord->lastBackingPage;
273 swapcontext( &savedUserSnapshotContext, &savedSnapshotContext );
274 SSDEBUG("TAKESNAPSHOT RETURN\n");
279 /** The rollBack function rollback to the given snapshot identifier.
280 * @param theID is the snapshot identifier to rollback to.
282 void rollBack( snapshot_id theID ){
283 #if USE_MPROTECT_SNAPSHOT
284 HashTable< void *, bool, uintptr_t, 4, MYMALLOC, MYCALLOC, MYFREE> duplicateMap;
285 for(unsigned int region=0; region<snapshotrecord->lastRegion;region++) {
286 if( mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages*sizeof(struct SnapShotPage), PROT_READ | PROT_WRITE ) == -1 ){
288 printf("Failed to mprotect inside of takeSnapShot\n");
292 for(unsigned int page=snapshotrecord->snapShots[theID].firstBackingPage; page<snapshotrecord->lastBackingPage; page++) {
293 if( !duplicateMap.contains(snapshotrecord->backingRecords[page].basePtrOfPage )) {
294 duplicateMap.put(snapshotrecord->backingRecords[page].basePtrOfPage, true);
295 memcpy(snapshotrecord->backingRecords[page].basePtrOfPage, &snapshotrecord->backingStore[page], sizeof(struct SnapShotPage));
298 snapshotrecord->lastSnapShot=theID;
299 snapshotrecord->lastBackingPage=snapshotrecord->snapShots[theID].firstBackingPage;
300 takeSnapshot(); //Make sure current snapshot is still good...All later ones are cleared
302 snapshotrecord->mIDToRollback = theID;
303 volatile int sTemp = 0;
304 getcontext( &snapshotrecord->mContextToRollback );
306 * This is used to quit the process on rollback, so that the process
307 * which needs to rollback can quit allowing the process whose
308 * snapshotid matches the rollbackid to switch to this context and
313 SSDEBUG("Invoked rollback\n");
317 * This fix obviates the need for a finalize call. hence less dependences for model-checker....
320 snapshotrecord->mIDToRollback = -1;