3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM)
22 #define INITIALHEAPSIZE 32*1024*1024
23 #define GCPOINT(x) ((int)((x)*0.9))
24 /* This define takes in how full the heap is initially and returns a new heap size to use */
25 #define HEAPSIZE(x,y) (((int)((x)/0.6))+y)
28 extern struct genhashtable * activetasks;
30 extern struct parameterwrapper * objectqueues[NUMCLASSES];
32 extern struct genhashtable * failedtasks;
33 extern struct taskparamdescriptor *currtpd;
34 extern struct ctable *forward;
35 extern struct ctable *reverse;
36 extern struct RuntimeHash *fdtoobject;
39 #if defined(THREADS) || defined(DSTM)
41 struct listitem * list=NULL;
45 //Need to check if pointers are transaction pointers
46 //this also catches the special flag value of 1 for local copies
48 #define ENQUEUE(orig, dst) \
49 if ((!(((unsigned int)orig)&0x1))) { \
50 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
52 if (gc_createcopy(orig,©)) \
57 #elif defined(FASTCHECK)
58 #define ENQUEUE(orig, dst) \
59 if (((unsigned int)orig)!=1) { \
61 if (gc_createcopy(orig,©)) \
65 #define ENQUEUE(orig, dst) \
67 if (gc_createcopy(orig,©)) \
74 struct pointerblock *next;
77 void * curr_heapbase=0;
78 void * curr_heapptr=0;
79 void * curr_heapgcpoint=0;
80 void * curr_heaptop=0;
87 struct pointerblock *head=NULL;
89 struct pointerblock *tail=NULL;
91 struct pointerblock *spare=NULL;
93 void enqueue(void *ptr) {
94 if (headindex==NUMPTRS) {
95 struct pointerblock * tmp;
100 tmp=malloc(sizeof(struct pointerblock));
105 head->ptrs[headindex++]=ptr;
109 if (tailindex==NUMPTRS) {
110 struct pointerblock *tmp=tail;
118 return tail->ptrs[tailindex++];
122 if ((head==tail)&&(tailindex==headindex))
128 struct pointerblock *taghead=NULL;
131 void enqueuetag(struct ___TagDescriptor___ *ptr) {
132 if (tagindex==NUMPTRS) {
133 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
138 taghead->ptrs[tagindex++]=ptr;
143 void collect(struct garbagelist * stackptr) {
144 #if defined(THREADS)||defined(DSTM)
146 pthread_mutex_lock(&gclistlock);
148 if ((listcount+1)==threadcount) {
149 break; /* Have all other threads stopped */
151 pthread_cond_wait(&gccond, &gclistlock);
158 head=tail=malloc(sizeof(struct pointerblock));
164 taghead=malloc(sizeof(struct pointerblock));
169 /* Check current stack */
170 #if defined(THREADS)||defined(DSTM)
172 struct listitem *listptr=list;
176 while(stackptr!=NULL) {
178 for(i=0; i<stackptr->size; i++) {
179 void * orig=stackptr->array[i];
180 ENQUEUE(orig, stackptr->array[i]);
182 stackptr=stackptr->next;
184 #if defined(THREADS)||defined(DSTM)
185 /* Go to next thread */
187 void * orig=listptr->locklist;
188 ENQUEUE(orig, listptr->locklist);
189 stackptr=listptr->stackptr;
190 listptr=listptr->next;
198 ENQUEUE(___fcrevert___, ___fcrevert___);
203 /* Update objectsets */
205 for(i=0; i<NUMCLASSES; i++) {
208 struct parameterwrapper * p=objectqueues[i];
210 struct ObjectHash * set=p->objectset;
211 struct ObjectNode * ptr=set->listhead;
213 void *orig=(void *)ptr->key;
214 ENQUEUE(orig, *((void **)(&ptr->key)));
217 ObjectHashrehash(set); /* Rehash the table */
226 struct cnode * ptr=forward->listhead;
228 void * orig=(void *)ptr->key;
229 ENQUEUE(orig, *((void **)(&ptr->key)));
232 crehash(forward); /* Rehash the table */
236 struct cnode * ptr=reverse->listhead;
238 void *orig=(void *)ptr->val;
239 ENQUEUE(orig, *((void**)(&ptr->val)));
246 struct RuntimeNode * ptr=fdtoobject->listhead;
248 void *orig=(void *)ptr->data;
249 ENQUEUE(orig, *((void**)(&ptr->data)));
255 /* Update current task descriptor */
257 for(i=0; i<currtpd->numParameters; i++) {
258 void *orig=currtpd->parameterArray[i];
259 ENQUEUE(orig, currtpd->parameterArray[i]);
264 /* Update active tasks */
266 struct genpointerlist * ptr=activetasks->list;
268 struct taskparamdescriptor *tpd=ptr->src;
270 for(i=0; i<tpd->numParameters; i++) {
271 void * orig=tpd->parameterArray[i];
272 ENQUEUE(orig, tpd->parameterArray[i]);
276 genrehash(activetasks);
279 /* Update failed tasks */
281 struct genpointerlist * ptr=failedtasks->list;
283 struct taskparamdescriptor *tpd=ptr->src;
285 for(i=0; i<tpd->numParameters; i++) {
286 void * orig=tpd->parameterArray[i];
287 ENQUEUE(orig, tpd->parameterArray[i]);
291 genrehash(failedtasks);
296 void * ptr=dequeue();
297 void *cpy=((void **)ptr)[1];
298 int type=((int *)cpy)[0];
299 unsigned int * pointer;
303 /* Nothing is inside */
308 pointer=pointerarray[type];
310 /* Array of primitives */
312 #if defined(DSTM)||defined(FASTCHECK)
313 struct ArrayObject *ao=(struct ArrayObject *) ptr;
314 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
315 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
316 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
318 } else if (((int)pointer)==1) {
319 /* Array of pointers */
320 struct ArrayObject *ao=(struct ArrayObject *) ptr;
321 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
322 #if (defined(DSTM)||defined(FASTCHECK))
323 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
324 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
326 int length=ao->___length___;
328 for(i=0; i<length; i++) {
329 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
330 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
335 for(i=1; i<=size; i++) {
336 unsigned int offset=pointer[i];
337 void * objptr=*((void **)(((int)ptr)+offset));
338 ENQUEUE(objptr, *((void **)(((int)cpy)+offset)));
346 #if defined(THREADS)||defined(DSTM)
348 pthread_mutex_unlock(&gclistlock);
353 /* Fix up the references from tags. This can't be done earlier,
354 because we don't want tags to keep objects alive */
356 while(taghead!=NULL) {
358 struct pointerblock *tmp=taghead->next;
359 for(i=0; i<tagindex; i++) {
360 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
361 struct ___Object___ *obj=tagd->flagptr;
362 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
364 /* Zero object case */
365 } else if (obj->type==-1) {
366 /* Single object case */
367 copy->flagptr=((struct ___Object___**)obj)[1];
368 } else if (obj->type==OBJECTARRAYTYPE) {
370 struct ArrayObject *ao=(struct ArrayObject *) obj;
374 struct ArrayObject *aonew;
376 /* Count live objects */
377 for(j=0; j<ao->___cachedCode___; j++) {
378 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
383 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
384 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
385 memcpy(aonew, ao, sizeof(struct ArrayObject));
386 aonew->type=OBJECTARRAYTYPE;
387 aonew->___length___=livecount;
389 for(j=0; j<ao->___cachedCode___; j++) {
390 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
391 if (tobj->type==-1) {
392 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
393 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
396 aonew->___cachedCode___=k;
397 for(; k<livecount; k++) {
398 ARRAYSET(aonew, struct ___Object___*, k, NULL);
401 /* No object live anymore */
412 void * tomalloc(int size) {
413 void * ptr=to_heapptr;
420 #if defined(THREADS)||defined(DSTM)
421 void checkcollect(void * ptr) {
422 struct listitem * tmp=stopforgc((struct garbagelist *)ptr);
423 pthread_mutex_lock(&gclock); // Wait for GC
425 pthread_mutex_unlock(&gclock);
429 void checkcollect2(void * ptr) {
430 int ptrarray[]={1, (int)ptr, (int) revertlist};
431 struct listitem * tmp=stopforgc((struct garbagelist *)ptrarray);
432 pthread_mutex_lock(&gclock); // Wait for GC
434 pthread_mutex_unlock(&gclock);
435 revertlist=(struct ___Object___*)ptrarray[2];
440 struct listitem * stopforgc(struct garbagelist * ptr) {
441 struct listitem * litem=malloc(sizeof(struct listitem));
443 litem->locklist=pthread_getspecific(threadlocks);
445 pthread_mutex_lock(&gclistlock);
451 pthread_cond_signal(&gccond);
452 pthread_mutex_unlock(&gclistlock);
456 void restartaftergc(struct listitem * litem) {
457 pthread_mutex_lock(&gclistlock);
458 pthread_setspecific(threadlocks, litem->locklist);
459 if (litem->prev==NULL) {
462 litem->prev->next=litem->next;
464 if (litem->next!=NULL) {
465 litem->next->prev=litem->prev;
468 pthread_mutex_unlock(&gclistlock);
473 void * mygcmalloc(struct garbagelist * stackptr, int size) {
475 #if defined(THREADS)||defined(DSTM)
476 if (pthread_mutex_trylock(&gclock)!=0) {
477 struct listitem *tmp=stopforgc(stackptr);
478 pthread_mutex_lock(&gclock);
486 if (curr_heapptr>curr_heapgcpoint) {
487 if (curr_heapbase==0) {
488 /* Need to allocate base heap */
489 curr_heapbase=malloc(INITIALHEAPSIZE);
490 bzero(curr_heapbase, INITIALHEAPSIZE);
491 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
492 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
493 curr_heapptr=curr_heapbase+size;
495 to_heapbase=malloc(INITIALHEAPSIZE);
496 to_heaptop=to_heapbase+INITIALHEAPSIZE;
497 to_heapptr=to_heapbase;
499 #if defined(THREADS)||defined(DSTM)
500 pthread_mutex_unlock(&gclock);
505 /* Grow the to heap if necessary */
507 int curr_heapsize=curr_heaptop-curr_heapbase;
508 int to_heapsize=to_heaptop-to_heapbase;
511 last_heapsize=HEAPSIZE(lastgcsize, size);
512 if ((last_heapsize&7)!=0)
513 last_heapsize+=(8-(last_heapsize%8));
515 if (curr_heapsize>last_heapsize)
516 last_heapsize=curr_heapsize;
517 if (last_heapsize>to_heapsize) {
519 to_heapbase=malloc(last_heapsize);
520 if (to_heapbase==NULL) {
521 printf("Error Allocating enough memory\n");
524 to_heaptop=to_heapbase+last_heapsize;
525 to_heapptr=to_heapbase;
529 /* Do our collection */
532 /* Update stat on previous gc size */
533 lastgcsize=(to_heapptr-to_heapbase)+size;
535 /* Flip to/curr heaps */
537 void * tmp=to_heapbase;
538 to_heapbase=curr_heapbase;
542 to_heaptop=curr_heaptop;
546 curr_heapptr=to_heapptr+size;
547 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
548 to_heapptr=to_heapbase;
550 /* Not enough room :(, redo gc */
551 if (curr_heapptr>curr_heapgcpoint) {
552 #if defined(THREADS)||defined(DSTM)
553 pthread_mutex_unlock(&gclock);
555 return mygcmalloc(stackptr, size);
558 bzero(tmp, curr_heaptop-tmp);
559 #if defined(THREADS)||defined(DSTM)
560 pthread_mutex_unlock(&gclock);
565 #if defined(THREADS)||defined(DSTM)
566 pthread_mutex_unlock(&gclock);
573 int gc_createcopy(void * orig, void ** copy_ptr) {
578 int type=((int *)orig)[0];
580 *copy_ptr=((void **)orig)[1];
583 if (type<NUMCLASSES) {
584 /* We have a normal object */
585 int size=classsize[type];
586 void *newobj=tomalloc(size);
587 memcpy(newobj, orig, size);
589 ((void **)orig)[1]=newobj;
593 /* We have an array */
594 struct ArrayObject *ao=(struct ArrayObject *)orig;
595 int elementsize=classsize[type];
596 int length=ao->___length___;
597 int size=sizeof(struct ArrayObject)+length*elementsize;
598 void *newobj=tomalloc(size);
599 memcpy(newobj, orig, size);
601 ((void **)orig)[1]=newobj;