3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
23 #include "delaycomp.h"
29 #define INITIALHEAPSIZE 256*1024*1024
30 #define GCPOINT(x) ((int)((x)*0.99))
31 /* This define takes in how full the heap is initially and returns a new heap size to use */
32 #define HEAPSIZE(x,y) ((int)(x+y))*2
35 extern struct genhashtable * activetasks;
37 extern struct parameterwrapper * objectqueues[NUMCLASSES];
39 extern struct genhashtable * failedtasks;
40 extern struct taskparamdescriptor *currtpd;
41 extern struct ctable *forward;
42 extern struct ctable *reverse;
43 extern struct RuntimeHash *fdtoobject;
48 long garbagearray[MAXSTATS];
51 #if defined(THREADS) || defined(DSTM) || defined(STM)
53 struct listitem * list=NULL;
56 __thread struct listitem litem;
60 //Need to check if pointers are transaction pointers
61 //this also catches the special flag value of 1 for local copies
63 #define ENQUEUE(orig, dst) \
64 if ((!(((unsigned int)orig)&0x1))) { \
65 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
67 if (gc_createcopy(orig,©)) \
73 #define ENQUEUE(orig, dst) \
74 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
76 if (gc_createcopy(orig,©)) \
80 #define SENQUEUE(orig, dst) \
83 if (gc_createcopy(orig,©)) \
87 #elif defined(FASTCHECK)
88 #define ENQUEUE(orig, dst) \
89 if (((unsigned int)orig)!=1) { \
91 if (gc_createcopy(orig,©)) \
95 #define ENQUEUE(orig, dst) \
97 if (gc_createcopy(orig,©)) \
102 struct pointerblock {
103 void * ptrs[NUMPTRS];
104 struct pointerblock *next;
107 void * curr_heapbase=0;
108 void * curr_heapptr=0;
109 void * curr_heapgcpoint=0;
110 void * curr_heaptop=0;
112 void * to_heapbase=0;
117 struct pointerblock *head=NULL;
119 struct pointerblock *tail=NULL;
121 struct pointerblock *spare=NULL;
123 void enqueue(void *ptr) {
124 if (headindex==NUMPTRS) {
125 struct pointerblock * tmp;
130 tmp=malloc(sizeof(struct pointerblock));
135 head->ptrs[headindex++]=ptr;
139 if (tailindex==NUMPTRS) {
140 struct pointerblock *tmp=tail;
148 return tail->ptrs[tailindex++];
152 void fixobjlist(struct objlist * ptr) {
155 for(i=0;i<ptr->offset;i++) {
156 SENQUEUE(ptr->objs[i], ptr->objs[i]);
162 void fixtable(chashlistnode_t ** tc_table, chashlistnode_t **tc_list, cliststruct_t **cstr, unsigned int tc_size) {
163 unsigned int mask=(tc_size<<4)-1;
164 chashlistnode_t *node=calloc(tc_size, sizeof(chashlistnode_t));
165 chashlistnode_t *ptr=*tc_table;
166 chashlistnode_t *curr;
170 chashlistnode_t *newlist=NULL;
171 for(i=0;i<tc_size;i++) {
174 do { //Inner loop to go through linked lists
176 chashlistnode_t *tmp,*next;
178 if ((key=(void *)curr->key) == 0) { //Exit inner loop if there the first element is 0
179 break; //key = val =0 for element if not present within the hash table
182 if (curr->val>=curr_heapbase&&curr->val<curr_heaptop) {
183 SENQUEUE(curr->val, curr->val);
185 //rewrite transaction cache entry
186 void *vptr=curr->val;
187 int type=((int *)vptr)[0];
188 unsigned INTPTR *pointer=pointerarray[type];
190 //array of primitives - do nothing
191 struct ArrayObject *ao=(struct ArrayObject *) vptr;
192 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
193 } else if (((INTPTR)pointer)==1) {
195 struct ArrayObject *ao=(struct ArrayObject *) vptr;
196 int length=ao->___length___;
198 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
199 for(i=0; i<length; i++) {
200 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
201 SENQUEUE(objptr, ((void **)(((char *)&ao->___length___)+sizeof(int)))[i]);
204 INTPTR size=pointer[0];
206 for(i=1; i<=size; i++) {
207 unsigned int offset=pointer[i];
208 void * objptr=*((void **)(((char *)vptr)+offset));
209 SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
215 index = (((unsigned INTPTR)key) & mask) >>4;
219 // Insert into the new table
221 tmp->key = curr->key;
222 tmp->val = curr->val;
225 } else if (isfirst) {
226 chashlistnode_t *newnode;
227 if ((*cstr)->num<NUMCLIST) {
228 newnode=&(*cstr)->array[(*cstr)->num];
232 cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
235 newnode=&tcl->array[0];
238 newnode->key = curr->key;
239 newnode->val = curr->val;
240 newnode->next = tmp->next;
241 newnode->lnext=newlist;
247 curr->next=tmp->next;
261 if ((head==tail)&&(tailindex==headindex))
267 struct pointerblock *taghead=NULL;
270 void enqueuetag(struct ___TagDescriptor___ *ptr) {
271 if (tagindex==NUMPTRS) {
272 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
277 taghead->ptrs[tagindex++]=ptr;
281 #if defined(STM)||defined(THREADS)
282 __thread char * memorybase=NULL;
283 __thread char * memorytop=NULL;
287 void collect(struct garbagelist * stackptr) {
288 #if defined(THREADS)||defined(DSTM)||defined(STM)
290 pthread_mutex_lock(&gclistlock);
292 if ((listcount+1)==threadcount) {
293 break; /* Have all other threads stopped */
295 pthread_cond_wait(&gccond, &gclistlock);
299 ptrstack.prev=stackptr;
300 stackptr=(struct garbagelist *) &ptrstack;
306 for(i=0;i<MAXSTATS;i++)
314 head=tail=malloc(sizeof(struct pointerblock));
320 taghead=malloc(sizeof(struct pointerblock));
327 fixtable(&c_table, &c_list, &c_structs, c_size);
330 fixobjlist(lockedobjs);
336 /* Check current stack */
337 #if defined(THREADS)||defined(DSTM)||defined(STM)
339 struct listitem *listptr=list;
343 while(stackptr!=NULL) {
345 for(i=0; i<stackptr->size; i++) {
346 void * orig=stackptr->array[i];
347 ENQUEUE(orig, stackptr->array[i]);
349 stackptr=stackptr->next;
351 #if defined(THREADS)||defined(DSTM)||defined(STM)
352 /* Go to next thread */
355 if (listptr==&litem) {
356 listptr=listptr->next;
360 struct listitem *litem=pthread_getspecific(litemkey);
361 if (listptr==litem) {
362 listptr=listptr->next;
369 void * orig=listptr->locklist;
370 ENQUEUE(orig, listptr->locklist);
373 if ((*listptr->tc_table)!=NULL) {
374 fixtable(listptr->tc_table, listptr->tc_list, listptr->tc_structs, listptr->tc_size);
375 fixobjlist(listptr->objlist);
377 fixobjlist(listptr->lockedlist);
380 *(listptr->base)=NULL;
382 stackptr=listptr->stackptr;
383 listptr=listptr->next;
391 ENQUEUE(___fcrevert___, ___fcrevert___);
396 /* Update objectsets */
398 for(i=0; i<NUMCLASSES; i++) {
399 #if !defined(MULTICORE)
400 struct parameterwrapper * p=objectqueues[i];
402 struct ObjectHash * set=p->objectset;
403 struct ObjectNode * ptr=set->listhead;
405 void *orig=(void *)ptr->key;
406 ENQUEUE(orig, *((void **)(&ptr->key)));
409 ObjectHashrehash(set); /* Rehash the table */
418 struct cnode * ptr=forward->listhead;
420 void * orig=(void *)ptr->key;
421 ENQUEUE(orig, *((void **)(&ptr->key)));
424 crehash(forward); /* Rehash the table */
428 struct cnode * ptr=reverse->listhead;
430 void *orig=(void *)ptr->val;
431 ENQUEUE(orig, *((void**)(&ptr->val)));
438 struct RuntimeNode * ptr=fdtoobject->listhead;
440 void *orig=(void *)ptr->data;
441 ENQUEUE(orig, *((void**)(&ptr->data)));
447 /* Update current task descriptor */
449 for(i=0; i<currtpd->numParameters; i++) {
450 void *orig=currtpd->parameterArray[i];
451 ENQUEUE(orig, currtpd->parameterArray[i]);
456 /* Update active tasks */
458 struct genpointerlist * ptr=activetasks->list;
460 struct taskparamdescriptor *tpd=ptr->src;
462 for(i=0; i<tpd->numParameters; i++) {
463 void * orig=tpd->parameterArray[i];
464 ENQUEUE(orig, tpd->parameterArray[i]);
468 genrehash(activetasks);
471 /* Update failed tasks */
473 struct genpointerlist * ptr=failedtasks->list;
475 struct taskparamdescriptor *tpd=ptr->src;
477 for(i=0; i<tpd->numParameters; i++) {
478 void * orig=tpd->parameterArray[i];
479 ENQUEUE(orig, tpd->parameterArray[i]);
483 genrehash(failedtasks);
488 void * ptr=dequeue();
490 int type=((int *)cpy)[0];
491 unsigned INTPTR * pointer;
495 /* Nothing is inside */
500 pointer=pointerarray[type];
502 /* Array of primitives */
504 #if defined(DSTM)||defined(FASTCHECK)
505 struct ArrayObject *ao=(struct ArrayObject *) ptr;
506 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
507 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
508 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
511 struct ArrayObject *ao=(struct ArrayObject *) ptr;
512 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
513 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
515 } else if (((INTPTR)pointer)==1) {
516 /* Array of pointers */
517 struct ArrayObject *ao=(struct ArrayObject *) ptr;
518 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
519 #if (defined(DSTM)||defined(FASTCHECK))
520 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
521 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
524 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
526 int length=ao->___length___;
528 for(i=0; i<length; i++) {
529 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
530 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
533 INTPTR size=pointer[0];
535 for(i=1; i<=size; i++) {
536 unsigned int offset=pointer[i];
537 void * objptr=*((void **)(((char *)ptr)+offset));
538 ENQUEUE(objptr, *((void **)(((char *)cpy)+offset)));
546 #if defined(THREADS)||defined(DSTM)||defined(STM)
548 pthread_mutex_unlock(&gclistlock);
553 /* Fix up the references from tags. This can't be done earlier,
554 because we don't want tags to keep objects alive */
556 while(taghead!=NULL) {
558 struct pointerblock *tmp=taghead->next;
559 for(i=0; i<tagindex; i++) {
560 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
561 struct ___Object___ *obj=tagd->flagptr;
562 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
564 /* Zero object case */
565 } else if (obj->type==-1) {
566 /* Single object case */
567 copy->flagptr=((struct ___Object___**)obj)[1];
568 } else if (obj->type==OBJECTARRAYTYPE) {
570 struct ArrayObject *ao=(struct ArrayObject *) obj;
574 struct ArrayObject *aonew;
576 /* Count live objects */
577 for(j=0; j<ao->___cachedCode___; j++) {
578 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
583 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
584 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
585 memcpy(aonew, ao, sizeof(struct ArrayObject));
586 aonew->type=OBJECTARRAYTYPE;
587 aonew->___length___=livecount;
589 for(j=0; j<ao->___cachedCode___; j++) {
590 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
591 if (tobj->type==-1) {
592 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
593 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
596 aonew->___cachedCode___=k;
597 for(; k<livecount; k++) {
598 ARRAYSET(aonew, struct ___Object___*, k, NULL);
601 /* No object live anymore */
612 void * tomalloc(int size) {
613 void * ptr=to_heapptr;
620 #if defined(THREADS)||defined(DSTM)||defined(STM)
621 void checkcollect(void * ptr) {
622 stopforgc((struct garbagelist *)ptr);
627 void checkcollect2(void * ptr) {
628 int ptrarray[]={1, (int)ptr, (int) revertlist};
629 stopforgc((struct garbagelist *)ptrarray);
631 revertlist=(struct ___Object___*)ptrarray[2];
635 void stopforgc(struct garbagelist * ptr) {
637 //just append us to the list
639 ptr=(struct garbagelist *) &ptrstack;
644 litem.locklist=pthread_getspecific(threadlocks);
647 litem.tc_size=c_size;
648 litem.tc_table=&c_table;
649 litem.tc_list=&c_list;
650 litem.tc_structs=&c_structs;
651 litem.objlist=newobjs;
653 litem.lockedlist=lockedobjs;
655 litem.base=&memorybase;
659 struct listitem *litem=pthread_getspecific(litemkey);
662 litem->locklist=pthread_getspecific(threadlocks);
665 pthread_mutex_lock(&gclistlock);
667 if ((listcount+1)==threadcount) {
668 //only do wakeup if we are ready to GC
669 pthread_cond_signal(&gccond);
671 pthread_mutex_unlock(&gclistlock);
674 void restartaftergc() {
676 pthread_mutex_lock(&gclock); // Wait for GC
677 pthread_mutex_unlock(&gclock);
679 pthread_mutex_lock(&gclistlock);
681 pthread_mutex_unlock(&gclistlock);
685 #if defined(STM)||defined(THREADS)
686 #define MEMORYBLOCK 65536
687 void * helper(struct garbagelist *, int);
688 void * mygcmalloc(struct garbagelist * stackptr, int size) {
691 if (memorybase==NULL||size>(memorytop-memorybase)) {
692 int toallocate=(size>MEMORYBLOCK)?size:MEMORYBLOCK;
693 memorybase=helper(stackptr, toallocate);
694 memorytop=memorybase+toallocate;
696 char *retvalue=memorybase;
701 void * helper(struct garbagelist * stackptr, int size) {
703 void * mygcmalloc(struct garbagelist * stackptr, int size) {
706 #if defined(THREADS)||defined(DSTM)||defined(STM)
707 while (pthread_mutex_trylock(&gclock)!=0) {
716 if (curr_heapptr>curr_heapgcpoint||curr_heapptr<curr_heapbase) {
717 if (curr_heapbase==0) {
718 /* Need to allocate base heap */
719 curr_heapbase=malloc(INITIALHEAPSIZE);
720 if (curr_heapbase==NULL) {
721 printf("malloc failed\n");
724 bzero(curr_heapbase, INITIALHEAPSIZE);
725 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
726 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
727 curr_heapptr=curr_heapbase+size;
729 to_heapbase=malloc(INITIALHEAPSIZE);
730 if (to_heapbase==NULL) {
731 printf("malloc failed\n");
734 to_heaptop=to_heapbase+INITIALHEAPSIZE;
735 to_heapptr=to_heapbase;
737 #if defined(THREADS)||defined(DSTM)||defined(STM)
738 pthread_mutex_unlock(&gclock);
743 /* Grow the to heap if necessary */
745 int curr_heapsize=curr_heaptop-curr_heapbase;
746 int to_heapsize=to_heaptop-to_heapbase;
749 last_heapsize=HEAPSIZE(lastgcsize, size);
750 if ((last_heapsize&7)!=0)
751 last_heapsize+=(8-(last_heapsize%8));
753 if (curr_heapsize>last_heapsize)
754 last_heapsize=curr_heapsize;
755 if (last_heapsize>to_heapsize) {
757 to_heapbase=malloc(last_heapsize);
758 if (to_heapbase==NULL) {
759 printf("Error Allocating enough memory\n");
762 to_heaptop=to_heapbase+last_heapsize;
763 to_heapptr=to_heapbase;
767 /* Do our collection */
770 /* Update stat on previous gc size */
771 lastgcsize=(to_heapptr-to_heapbase)+size;
774 printf("Garbage collected: Old bytes: %u\n", curr_heapptr-curr_heapbase);
775 printf("New space: %u\n", to_heapptr-to_heapbase);
776 printf("Total space: %u\n", to_heaptop-to_heapbase);
779 for(i=0;i<MAXSTATS;i++) {
780 if (garbagearray[i]!=0)
781 printf("Type=%d Size=%u\n", i, garbagearray[i]);
785 /* Flip to/curr heaps */
787 void * tmp=to_heapbase;
788 to_heapbase=curr_heapbase;
792 to_heaptop=curr_heaptop;
796 curr_heapptr=to_heapptr+size;
797 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
798 to_heapptr=to_heapbase;
800 /* Not enough room :(, redo gc */
801 if (curr_heapptr>curr_heapgcpoint) {
802 #if defined(THREADS)||defined(DSTM)||defined(STM)
803 pthread_mutex_unlock(&gclock);
805 return mygcmalloc(stackptr, size);
808 bzero(tmp, curr_heaptop-tmp);
809 #if defined(THREADS)||defined(DSTM)||defined(STM)
810 pthread_mutex_unlock(&gclock);
815 #if defined(THREADS)||defined(DSTM)||defined(STM)
816 pthread_mutex_unlock(&gclock);
823 int gc_createcopy(void * orig, void ** copy_ptr) {
828 int type=((int *)orig)[0];
830 *copy_ptr=((void **)orig)[1];
833 if (type<NUMCLASSES) {
834 /* We have a normal object */
836 int size=classsize[type]+sizeof(objheader_t);
837 void *newobj=tomalloc(size);
838 memcpy(newobj,((char *) orig)-sizeof(objheader_t), size);
839 newobj=((char *)newobj)+sizeof(objheader_t);
841 int size=classsize[type];
842 void *newobj=tomalloc(size);
843 memcpy(newobj, orig, size);
846 garbagearray[type]+=size;
849 ((void **)orig)[1]=newobj;
853 /* We have an array */
854 struct ArrayObject *ao=(struct ArrayObject *)orig;
855 int elementsize=classsize[type];
856 int length=ao->___length___;
858 int size=sizeof(struct ArrayObject)+length*elementsize+sizeof(objheader_t);
859 void *newobj=tomalloc(size);
860 memcpy(newobj, ((char*)orig)-sizeof(objheader_t), size);
861 newobj=((char *)newobj)+sizeof(objheader_t);
863 int size=sizeof(struct ArrayObject)+length*elementsize;
864 void *newobj=tomalloc(size);
865 memcpy(newobj, orig, size);
868 garbagearray[type]+=size;
871 ((void **)orig)[1]=newobj;