From: jihoonl Date: Fri, 12 Feb 2010 23:12:52 +0000 (+0000) Subject: mlookup bug fix X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=7f62ec4d139235db3af270b2bea02ff70113ec9b;p=IRC.git mlookup bug fix --- diff --git a/Robust/src/Runtime/DSTM/interface_recovery/altmlookup.c b/Robust/src/Runtime/DSTM/interface_recovery/altmlookup.c new file mode 100644 index 00000000..d982ed91 --- /dev/null +++ b/Robust/src/Runtime/DSTM/interface_recovery/altmlookup.c @@ -0,0 +1,438 @@ +#include "altmlookup.h" +#include "dsmlock.h" +#include + +mhashtable_t mlookup; //Global hash table + +// Creates a machine lookup table with size =" size" +unsigned int mhashCreate(unsigned int size, double loadfactor) { + mhashlistnode_t *nodes; + // Allocate space for the hash table + if((nodes = calloc(size, sizeof(mhashlistnode_t))) == NULL) { + printf("Calloc error %s %d\n", __FILE__, __LINE__); + return 1; + } + + mlookup.table = nodes; + mlookup.size = size; + mlookup.threshold=size*loadfactor; + mlookup.mask = size -1; + mlookup.numelements = 0; // Initial number of elements in the hash + mlookup.loadfactor = loadfactor; + int i; + for(i=0;i>1; +} + +// Insert value and key mapping into the hash table +void mhashInsert(unsigned int key, void *val) { + mhashlistnode_t *node; + + if (mlookup.numelements > mlookup.threshold) { + //Resize Table + unsigned int newsize = mlookup.size << 1; + mhashResize(newsize); + } + + unsigned int keyindex=key>>1; + volatile unsigned int * lockptr=&mlookup.larray[keyindex&LOCKMASK].lock; + while(!write_trylock(lockptr)) { + sched_yield(); + } + + mhashlistnode_t * ptr = &mlookup.table[keyindex&mlookup.mask]; + atomic_inc(&mlookup.numelements); + + if(ptr->key ==0) { + ptr->key=key; + ptr->val=val; + ptr->next = NULL; + } else { // Insert in the beginning of linked list + node = calloc(1, sizeof(mhashlistnode_t)); + node->key = key; + node->val = val; + node->next = ptr->next; + ptr->next=node; + } + write_unlock(lockptr); +} + +// Return val for a given key in the hash table +void *mhashSearch(unsigned int key) { + int index; + + unsigned int keyindex=key>>1; + volatile unsigned int * lockptr=&mlookup.larray[keyindex&LOCKMASK].lock; + + while(!read_trylock(lockptr)) { + sched_yield(); + } + + mhashlistnode_t *node = &mlookup.table[keyindex&mlookup.mask]; + + do { + if(node->key == key) { + void * tmp=node->val; + read_unlock(lockptr); + return tmp; + } + node = node->next; + } while (node!=NULL); + read_unlock(lockptr); + return NULL; +} + +// Remove an entry from the hash table +unsigned int mhashRemove(unsigned int key) { + int index; + mhashlistnode_t *prev; + mhashlistnode_t *ptr, *node; + + unsigned int keyindex=key>>1; + volatile unsigned int * lockptr=&mlookup.larray[keyindex&LOCKMASK].lock; + + while(!write_trylock(lockptr)) { + sched_yield(); + } + + mhashlistnode_t *curr = &mlookup.table[keyindex&mlookup.mask]; + + for (; curr != NULL; curr = curr->next) { + if (curr->key == key) { + atomic_dec(&(mlookup.numelements)); + if ((curr == &ptr[index]) && (curr->next == NULL)) { + curr->key = 0; + curr->val = NULL; + } else if ((curr == &ptr[index]) && (curr->next != NULL)) { + curr->key = curr->next->key; + curr->val = curr->next->val; + node = curr->next; + curr->next = curr->next->next; + free(node); + } else { + prev->next = curr->next; + free(curr); + } + write_unlock(lockptr); + return 0; + } + prev = curr; + } + write_unlock(lockptr); + return 1; +} + +// Resize table +void mhashResize(unsigned int newsize) { + mhashlistnode_t *node, *curr; + int isfirst; + unsigned int i,index; + unsigned int mask; + + for(i=0;ikey) == 0) { + break; + } + next = curr->next; + index = (key >> 1) & mask; + tmp=&mlookup.table[index]; + + if(tmp->key ==0) { + tmp->key=curr->key; + tmp->val=curr->val; + if (!isfirst) + free(curr); + } /* + + NOTE: Add this case if you change this... + This case currently never happens because of the way things rehash.... +else if (isfirst) { + mhashlistnode_t *newnode = calloc(1, sizeof(mhashlistnode_t)); + newnode->key = curr->key; + newnode->val = curr->val; + newnode->next = tmp->next; + tmp->next=newnode; + } */ + else { + curr->next=tmp->next; + tmp->next=curr; + } + isfirst = 0; + curr = next; + } while(curr!=NULL); + } + + free(ptr); + for(i=0;ikey; + curr = curr->next; + } + } + } + + if (keyindex != *numKeys) + printf("mhashGetKeys(): WARNING: incorrect mlookup.numelements value!\n"); + + pthread_mutex_unlock(&mlookup.locktable); + return keys; + }*/ + +#ifdef RECOVERY +void* mhashGetDuplicate(int *dupeSize, int backup) { //how big? +#ifdef DEBUG + printf("%s-> Start\n", __func__); +#endif + unsigned int numdupe = 0; + void* dPtr; + int i; + + unsigned int *oidsdupe; + + unsigned int mask; + + for(i=0;i callock error\n",__FILE__,__func__,__LINE__); + exit(-1); + } + + int size = 0, tempsize = 0; + objheader_t *header; + + mhashlistnode_t *node; +// go through object store; +// track sizes, oids, and num +// printf("%s -> Before mutex lock\n",__func__); +// pthread_mutex_lock(&mlookup.locktable); +// printf("%s -> After mutex lock\n",__func__); + + size =0; + tempsize =0; + + for(i = 0; i < mlookup.size; i++) { + if (mlookup.table[i].key != 0) { + node = &mlookup.table[i]; + while(node != NULL) { // no nodes +// printf("%s -> node : %d node->val : %d \n",__func__,node,node->val); + + header = (objheader_t *)node->val; + if((header->isBackup && backup) || (!header->isBackup && !backup)) { + oidsdupe[numdupe++] = OID(header); + GETSIZE(tempsize, header); + size += tempsize + sizeof(objheader_t); + + if(header->notifylist != NULL) { + // number of nodes + actual size of array + size += (sizeof(unsigned int) + (getListSize(header->notifylist) * sizeof(threadlist_t))); + } + } + node = node->next; + } + } + } +// printf("%s -> size = %d\n",__func__,size); + + for(i=0;iisBackup && backup) { + ((objheader_t*)ptr)->isBackup = 0; + }else if(!(header->isBackup) && !backup) { + ((objheader_t*)ptr)->isBackup = 1; + } + else { + printf("%s -> ERROR\n",__func__); + exit(0); + } + + ptr += tempsize; + + if(header->notifylist != NULL) { + unsigned int listSize; + /* get duplicate array of threadlist */ + threadlist_t *threadArray; + listSize = convertToArray(header->notifylist,&threadArray); + + memcpy(ptr, &listSize,sizeof(unsigned int)); + ptr += sizeof(unsigned int); + + memcpy(ptr, threadArray, (sizeof(threadlist_t) * listSize)); + ptr += (sizeof(threadlist_t) * listSize); + free(threadArray); + } + } +#ifdef DEBUG + printf("%s-> End\n", __func__); +#endif + + free(oidsdupe); + + // number of oid size + data array + *dupeSize = (sizeof(unsigned int) + sizeof(int) + size); + + return dPtr; +} + +/* +int mhashGetThreadObjects(unsigned int** oidArray,unsigned int** midArray,unsigned int** threadidArray) +{ + printf("%s-> Start\n", __func__); + unsigned int oidArr[mlookup.numelements]; + unsigned int midArr[mlookup.numelements]; + unsigned int threadidArr[mlookup.numelements]; + unsigned int* hashkeys; + unsigned int numKeys; + objheader_t *header; + int i; + + int size =0; + mhashlistnode_t *node; +// go through object store; +// track sizes, oids, and num + + hashkeys = mhashGetKeys(&numKeys); + printf("%s -> numKeys : %d\n",__func__,numKeys); + + threadlist_t* t; + threadlist_t* tmp; + + for(i = 0; i < numKeys; i++) { + header = (objheader_t*)mhashSearch(hashkeys[i]); + pthread_mutex_lock(&mlookup.locktable); + + if(header->isBackup && header->notifylist != NULL) { + + t = header->notifylist; + + while(t) { + oidArr[size] = OID(header); + midArr[size] = t->mid; + threadidArr[size++] = t->threadid; + tmp = t; + t = t->next; + free(tmp); + } + + header->notifylist = NULL; + } + pthread_mutex_unlock(&mlookup.locktable); + } + + free(hashkeys); + + printf("%s -> end copying Size : %d\n",__func__,size); + + if(size > 0) { + *oidArray = (unsigned int*) calloc(size, sizeof(unsigned int)); + *midArray = (unsigned int*) calloc(size, sizeof(unsigned int)); + *threadidArray = (unsigned int*) calloc(size, sizeof(unsigned int)); + + for(i = 0; i < size; i++) { + (*oidArray)[i] = oidArr[i]; + (*midArray)[i] = midArr[i]; + (*threadidArray)[i] = threadidArr[i]; + } + } + + printf("%s -> End\n",__func__); + + return size; + +}*/ +#endif diff --git a/Robust/src/Runtime/DSTM/interface_recovery/altmlookup.h b/Robust/src/Runtime/DSTM/interface_recovery/altmlookup.h new file mode 100644 index 00000000..ee566865 --- /dev/null +++ b/Robust/src/Runtime/DSTM/interface_recovery/altmlookup.h @@ -0,0 +1,54 @@ +#ifndef _MLOOKUP_H_ +#define _MLOOKUP_H_ + +#include +#include +#include + +#define MLOADFACTOR 0.25 +#define MHASH_SIZE 1024 + +#ifdef RECOVERY +#include "dstm.h" +#include "dsmlock.h" +#endif + +typedef struct mhashlistnode { + unsigned int key; + void *val; //this can be cast to another type or used to point to a larger structure + struct mhashlistnode *next; +} mhashlistnode_t; + +struct lockarray { + volatile unsigned int lock; + int buf[15]; +}; + +#define NUMLOCKS 16 +#define LOCKMASK (NUMLOCKS-1) + +typedef struct mhashtable { + mhashlistnode_t *table; // points to beginning of hash table + unsigned int size; + unsigned int mask; + unsigned int numelements; + unsigned int threshold; + double loadfactor; + struct lockarray larray[NUMLOCKS]; +} mhashtable_t; + +unsigned int mhashCreate(unsigned int size, double loadfactor); +unsigned int mhashFunction(unsigned int key); +void mhashInsert(unsigned int key, void *val); +void *mhashSearch(unsigned int key); //returns val, NULL if not found +unsigned int mhashRemove(unsigned int key); //returns -1 if not found +void mhashResize(unsigned int newsize); +//unsigned int *mhashGetKeys(unsigned int *numKeys); +void mhashPrint(); + +#endif + +#ifdef RECOVERY +void* mhashGetDuplicate(int* dupeSize,int backup); +//int mhashGetThreadObjects(unsigned int** oidArray,unsigned int** midArray,unsigned int** threadidArray); +#endif diff --git a/Robust/src/Runtime/DSTM/interface_recovery/dsmlock.c b/Robust/src/Runtime/DSTM/interface_recovery/dsmlock.c index 0b3135c4..69cb4d67 100644 --- a/Robust/src/Runtime/DSTM/interface_recovery/dsmlock.c +++ b/Robust/src/Runtime/DSTM/interface_recovery/dsmlock.c @@ -1,40 +1,25 @@ #include "dsmlock.h" #include -inline void initdsmlocks(volatile unsigned int *addr) { +inline void initdsmlocks(volatile int *addr) { (*addr) = RW_LOCK_BIAS; } - -inline void readLock(volatile unsigned int *addr) { - __asm__ __volatile__ ("" " subl $1,(%0)\n\t" - "jns 1f\n" - "1:\n" - :: "a" (addr) : "memory"); -} - -inline void writeLock(volatile unsigned int *addr) { - __asm__ __volatile__ ("" " subl %1,(%0)\n\t" - "jz 1f\n" - "1:\n" - :: "a" (addr), "i" (RW_LOCK_BIAS) : "memory"); -} - -static inline void atomic_dec(atomic_t *v) { +inline void atomic_dec(volatile int *v) { __asm__ __volatile__ (LOCK_PREFIX "decl %0" - : "+m" (v->counter)); + : "+m" (*v)); } -static inline void atomic_inc(atomic_t *v) { +inline void atomic_inc(volatile int *v) { __asm__ __volatile__ (LOCK_PREFIX "incl %0" - : "+m" (v->counter)); + : "+m" (*v)); } -static inline int atomic_sub_and_test(int i, atomic_t *v) { +static inline int atomic_sub_and_test(int i, volatile int *v) { unsigned char c; __asm__ __volatile__ (LOCK_PREFIX "subl %2,%0; sete %1" - : "+m" (v->counter), "=qm" (c) + : "+m" (*v), "=qm" (c) : "ir" (i) : "memory"); return c; } @@ -46,36 +31,41 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) { * * Atomically adds @i to @v. */ -static inline void atomic_add(int i, atomic_t *v) { +static inline void atomic_add(int i, volatile int *v) { __asm__ __volatile__ (LOCK_PREFIX "addl %1,%0" - : "+m" (v->counter) + : "+m" (*v) : "ir" (i)); } -inline int read_trylock(volatile unsigned int *lock) { - atomic_t *count = (atomic_t *)lock; - - atomic_dec(count); - if (atomic_read(count) >= 0) +inline int read_trylock(volatile int *lock) { + atomic_dec(lock); + if (atomic_read(lock) >= 0) return 1; //can aquire a new read lock - atomic_inc(count); + atomic_inc(lock); return 0; //failure } -inline int write_trylock(volatile unsigned int *lock) { - atomic_t *count = (atomic_t *)lock; - if (atomic_sub_and_test(RW_LOCK_BIAS, count)) { +inline int write_trylock(volatile int *lock) { + if (atomic_sub_and_test(RW_LOCK_BIAS, lock)) { return 1; // get a write lock } - atomic_add(RW_LOCK_BIAS, count); + atomic_add(RW_LOCK_BIAS, lock); return 0; // failed to acquire a write lock } -inline void read_unlock(volatile unsigned int *rw) { +inline void read_unlock(volatile int *rw) { __asm__ __volatile__ (LOCK_PREFIX "incl %0" : "+m" (*rw) : : "memory"); } -inline void write_unlock(volatile unsigned int *rw) { +inline void write_unlock(volatile int *rw) { __asm__ __volatile__ (LOCK_PREFIX "addl %1, %0" : "+m" (*rw) : "i" (RW_LOCK_BIAS) : "memory"); } + +inline int is_write_locked(volatile int *lock) { + return lock < 0; +} + +inline int is_read_locked(volatile int *lock) { + return lock > 0; +} diff --git a/Robust/src/Runtime/DSTM/interface_recovery/dsmlock.h b/Robust/src/Runtime/DSTM/interface_recovery/dsmlock.h index 2ea3cdb6..d0a5df84 100644 --- a/Robust/src/Runtime/DSTM/interface_recovery/dsmlock.h +++ b/Robust/src/Runtime/DSTM/interface_recovery/dsmlock.h @@ -1,8 +1,9 @@ #ifndef _DSMLOCK_H_ #define _DSMLOCK_H_ +#define CFENCE asm volatile("":::"memory"); #define RW_LOCK_BIAS 0x01000000 -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*v) #define RW_LOCK_UNLOCKED { RW_LOCK_BIAS } //#define LOCK_PREFIX "" #define LOCK_PREFIX \ @@ -13,20 +14,15 @@ "661:\n\tlock; " - -typedef struct { - unsigned int counter; -} atomic_t; - -void initdsmlocks(volatile unsigned int *addr); -void readLock(volatile unsigned int *addr); -void writeLock(volatile unsigned int *addr); -int read_trylock(volatile unsigned int *lock); -int write_trylock(volatile unsigned int *lock); -static void atomic_dec(atomic_t *v); -static void atomic_inc(atomic_t *v); -static void atomic_add(int i, atomic_t *v); -static int atomic_sub_and_test(int i, atomic_t *v); -void read_unlock(volatile unsigned int *rw); -void write_unlock(volatile unsigned int *rw); +void initdsmlocks(volatile int *addr); +int read_trylock(volatile int *lock); +int write_trylock(volatile int *lock); +void atomic_dec(volatile int *v); +void atomic_inc(volatile int *v); +static void atomic_add(int i, volatile int *v); +static int atomic_sub_and_test(int i, volatile int *v); +void read_unlock(volatile int *rw); +void write_unlock(volatile int *rw); +int is_write_locked(volatile int *lock); +int is_read_locked(volatile int *lock); #endif diff --git a/Robust/src/Runtime/DSTM/interface_recovery/dstm.h b/Robust/src/Runtime/DSTM/interface_recovery/dstm.h index e88c4142..b88591e9 100644 --- a/Robust/src/Runtime/DSTM/interface_recovery/dstm.h +++ b/Robust/src/Runtime/DSTM/interface_recovery/dstm.h @@ -284,6 +284,7 @@ void duplicateLocalBackupObjects(); void duplicateLocalOriginalObjects(); void restoreDuplicationState(unsigned int deadHost); int readDuplicateObjs(int); +void printRecoveryStat(); /* Paxo's algorithm */ int paxos(); diff --git a/Robust/src/Runtime/DSTM/interface_recovery/dstmserver.c b/Robust/src/Runtime/DSTM/interface_recovery/dstmserver.c index f014844e..a456312b 100644 --- a/Robust/src/Runtime/DSTM/interface_recovery/dstmserver.c +++ b/Robust/src/Runtime/DSTM/interface_recovery/dstmserver.c @@ -582,7 +582,8 @@ void *dstmAccept(void *acceptfd) { #endif //object store stuffffff recv_data((int)acceptfd, &mid, sizeof(unsigned int)); - tempsize = mhashGetDuplicate(&dupeptr, 0); + + dupeptr = (char*) mhashGetDuplicate(&tempsize, 0); //send control and dupes after ctrl = RECEIVE_DUPES; @@ -640,8 +641,7 @@ void *dstmAccept(void *acceptfd) { //object store stuffffff recv_data((int)acceptfd, &mid, sizeof(unsigned int)); - - tempsize = mhashGetDuplicate(&dupeptr, 1); + dupeptr = (char*) mhashGetDuplicate(&tempsize, 1); //send control and dupes after ctrl = RECEIVE_DUPES; diff --git a/Robust/src/Runtime/DSTM/interface_recovery/mlookup.c b/Robust/src/Runtime/DSTM/interface_recovery/mlookup.c index ab44fb48..786281b1 100644 --- a/Robust/src/Runtime/DSTM/interface_recovery/mlookup.c +++ b/Robust/src/Runtime/DSTM/interface_recovery/mlookup.c @@ -45,6 +45,7 @@ void mhashInsert(unsigned int key, void *val) { if(ptr->key ==0) { ptr->key=key; ptr->val=val; + ptr->next = NULL; } else { // Insert in the beginning of linked list node = calloc(1, sizeof(mhashlistnode_t)); node->key = key; @@ -206,15 +207,20 @@ unsigned int *mhashGetKeys(unsigned int *numKeys) { } #ifdef RECOVERY -int mhashGetDuplicate(void **dupeptr, int backup) { //how big? +void* mhashGetDuplicate(int *dupeSize, int backup) { //how big? #ifdef DEBUG printf("%s-> Start\n", __func__); #endif unsigned int numdupe = 0; void* dPtr; -// ok let's do this; - unsigned int oidsdupe[mlookup.size]; + unsigned int *oidsdupe; + + if((oidsdupe = (unsigned int*) calloc(mlookup.size,unsigned int)) == NULL) { + printf("%s %s(): %d -> callock error\n",__FILE__,__func__,__LINE__); + exit(-1); + } + int size = 0, tempsize = 0, i = 0; objheader_t *header; @@ -263,7 +269,6 @@ int mhashGetDuplicate(void **dupeptr, int backup) { //how big? // for each oid in oiddupe[] get object and format - *dupeptr = dPtr; void* ptr = dPtr; *((unsigned int *)(ptr)) = numdupe; ptr += sizeof(unsigned int); @@ -306,8 +311,13 @@ int mhashGetDuplicate(void **dupeptr, int backup) { //how big? #ifdef DEBUG printf("%s-> End\n", __func__); #endif + + free(oidsdupe); + // number of oid size + data array - return (sizeof(unsigned int) + sizeof(int) + size); + *dupeSize = (sizeof(unsigned int) + sizeof(int) + size); + + return dPtr; } int mhashGetThreadObjects(unsigned int** oidArray,unsigned int** midArray,unsigned int** threadidArray) diff --git a/Robust/src/Runtime/DSTM/interface_recovery/mlookup.h b/Robust/src/Runtime/DSTM/interface_recovery/mlookup.h index d6158e53..80034f5c 100644 --- a/Robust/src/Runtime/DSTM/interface_recovery/mlookup.h +++ b/Robust/src/Runtime/DSTM/interface_recovery/mlookup.h @@ -38,7 +38,7 @@ unsigned int mhashResize(unsigned int newsize); unsigned int *mhashGetKeys(unsigned int *numKeys); #ifdef RECOVERY -int mhashGetDuplicate(void** dupeptr,int backup); +void* mhashGetDuplicate(int* dupeSize,int backup); int mhashGetThreadObjects(unsigned int** oidArray,unsigned int** midArray,unsigned int** threadidArray); #endif diff --git a/Robust/src/Runtime/DSTM/interface_recovery/threadnotify.c b/Robust/src/Runtime/DSTM/interface_recovery/threadnotify.c index e140144b..5c2f6214 100644 --- a/Robust/src/Runtime/DSTM/interface_recovery/threadnotify.c +++ b/Robust/src/Runtime/DSTM/interface_recovery/threadnotify.c @@ -8,12 +8,17 @@ notifyhashtable_t nlookup; //Global hash table threadlist_t *insNode(threadlist_t *head, unsigned int threadid, unsigned int mid) { threadlist_t *ptr; if(head == NULL) { - head = malloc(sizeof(threadlist_t)); + + if((head = malloc(sizeof(threadlist_t))) == NULL) { + printf("%s -> cannot allocate memory\n",__func__); + } head->threadid = threadid; head->mid = mid; head->next = NULL; } else { - ptr = malloc(sizeof(threadlist_t)); + if((ptr = malloc(sizeof(threadlist_t))) == NULL) { + printf("%s -> cannot allocate memory\n",__func__); + } ptr->threadid = threadid; ptr->mid = mid; ptr->next = head; diff --git a/Robust/src/Runtime/DSTM/interface_recovery/trans.c b/Robust/src/Runtime/DSTM/interface_recovery/trans.c index 87692967..590bcc79 100644 --- a/Robust/src/Runtime/DSTM/interface_recovery/trans.c +++ b/Robust/src/Runtime/DSTM/interface_recovery/trans.c @@ -808,6 +808,7 @@ __attribute__((pure)) objheader_t *transRead2(unsigned int oid) { objheader_t *transCreateObj(unsigned int size) { objheader_t *tmp = (objheader_t *) objstrAlloc(&t_cache, (sizeof(objheader_t) + size)); OID(tmp) = getNewOID(); + tmp->notifylist = NULL; tmp->version = 1; tmp->rcount = 1; tmp->isBackup = 0; @@ -2614,6 +2615,7 @@ void duplicateLostObjects(unsigned int mid){ #ifdef RECOVERYSTATS time(&fi); elapsedTime[numRecovery-1] = difftime(fi,st); + printRecoveryStat(); #endif #ifndef DEBUG @@ -2630,7 +2632,7 @@ void duplicateLocalBackupObjects(unsigned int mid) { #endif //copy code from dstmserver here - tempsize = mhashGetDuplicate((void**)&dupeptr, 1); + dupeptr = (char*) mhashGetDuplicate(&tempsize, 1); #ifdef DEBUG printf("tempsize:%d, dupeptrfirstvalue:%d\n", tempsize, *((unsigned int *)(dupeptr))); @@ -2677,7 +2679,7 @@ void duplicateLocalOriginalObjects(unsigned int mid) { #endif //copy code fom dstmserver here - tempsize = mhashGetDuplicate((void**)&dupeptr, 0); + dupeptr = (char*) mhashGetDuplicate(&tempsize, 0); //send control and dupes after ctrl = RECEIVE_DUPES; @@ -3462,4 +3464,24 @@ int checkiftheMachineDead(unsigned int mid) { return getStatus(mIndex); } +#ifdef RECOVERYSTATS +void printRecoveryStat() { + printf("***** Recovery Stats *****\n"); + printf("numRecovery = %d\n",numRecovery); + int i; + for(i=0; i < numRecovery;i++) { + printf("Dead Machine = %s\n",midtoIPString(deadMachine[i])); + printf("Recovery Time = %.6f\n",elapsedTime[i]); + } + printf("**************************\n\n"); +} +#else +void printRecoveryStat() { + printf("No stat\n"); +} +#endif + + + + #endif diff --git a/Robust/src/Runtime/runtime.c b/Robust/src/Runtime/runtime.c index 7f20cfc7..56c003dc 100644 --- a/Robust/src/Runtime/runtime.c +++ b/Robust/src/Runtime/runtime.c @@ -357,15 +357,7 @@ void CALL01(___System______printString____L___String___,struct ___String___ * __ #ifdef D___RecoveryStat______printRecoveryStat____ #ifdef RECOVERYSTATS void CALL00(___RecoveryStat______printRecoveryStat____) { - - printf("***** Recovery Stats *****\n"); - printf("numRecovery = %d\n",numRecovery); - int i; - for(i=0; i < numRecovery;i++) { - printf("Dead Machine = %s\n",midtoIPString(deadMachine[i])); - printf("Recovery Time = %.2f\n",elapsedTime[i]); - } - printf("**************************\n\n"); + printRecoveryStat(); } #else void CALL00(___RecoveryStat______printRecoveryStat____) { @@ -374,8 +366,6 @@ void CALL00(___RecoveryStat______printRecoveryStat____) { #endif #endif - - #ifdef DSTM void CALL00(___System______clearPrefetchCache____) { prehashClear(); diff --git a/Robust/src/Runtime/thread.c b/Robust/src/Runtime/thread.c index 9f842071..5f94973f 100644 --- a/Robust/src/Runtime/thread.c +++ b/Robust/src/Runtime/thread.c @@ -590,6 +590,8 @@ void startDSMthread(int oid, int objType) { int retval; pthread_attr_t nattr; + printf("%s -> oid : %u\n",__func__,oid); + pthread_mutex_lock(&gclistlock); threadcount++; pthread_mutex_unlock(&gclistlock);