3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
25 #define INITIALHEAPSIZE 128*1024*1024
26 #define GCPOINT(x) ((int)((x)*0.95))
27 /* This define takes in how full the heap is initially and returns a new heap size to use */
28 #define HEAPSIZE(x,y) ((int)(x+y))*2
31 extern struct genhashtable * activetasks;
33 extern struct parameterwrapper * objectqueues[NUMCLASSES];
35 extern struct genhashtable * failedtasks;
36 extern struct taskparamdescriptor *currtpd;
37 extern struct ctable *forward;
38 extern struct ctable *reverse;
39 extern struct RuntimeHash *fdtoobject;
44 long garbagearray[MAXSTATS];
47 #if defined(THREADS) || defined(DSTM) || defined(STM)
49 struct listitem * list=NULL;
52 __thread struct listitem litem;
56 //Need to check if pointers are transaction pointers
57 //this also catches the special flag value of 1 for local copies
59 #define ENQUEUE(orig, dst) \
60 if ((!(((unsigned int)orig)&0x1))) { \
61 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
63 if (gc_createcopy(orig,©)) \
69 #define ENQUEUE(orig, dst) \
70 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
72 if (gc_createcopy(orig,©)) \
76 #define SENQUEUE(orig, dst) \
79 if (gc_createcopy(orig,©)) \
83 #elif defined(FASTCHECK)
84 #define ENQUEUE(orig, dst) \
85 if (((unsigned int)orig)!=1) { \
87 if (gc_createcopy(orig,©)) \
91 #define ENQUEUE(orig, dst) \
93 if (gc_createcopy(orig,©)) \
100 struct pointerblock *next;
103 void * curr_heapbase=0;
104 void * curr_heapptr=0;
105 void * curr_heapgcpoint=0;
106 void * curr_heaptop=0;
108 void * to_heapbase=0;
113 struct pointerblock *head=NULL;
115 struct pointerblock *tail=NULL;
117 struct pointerblock *spare=NULL;
119 void enqueue(void *ptr) {
120 if (headindex==NUMPTRS) {
121 struct pointerblock * tmp;
126 tmp=malloc(sizeof(struct pointerblock));
131 head->ptrs[headindex++]=ptr;
135 if (tailindex==NUMPTRS) {
136 struct pointerblock *tmp=tail;
144 return tail->ptrs[tailindex++];
148 void fixobjlist(struct objlist * ptr) {
151 for(i=0;i<ptr->offset;i++) {
152 SENQUEUE(ptr->objs[i], ptr->objs[i]);
158 void fixtable(chashlistnode_t ** tc_table, chashlistnode_t **tc_list, cliststruct_t **cstr, unsigned int tc_size) {
159 unsigned int mask=(tc_size<<4)-1;
160 chashlistnode_t *node=calloc(tc_size, sizeof(chashlistnode_t));
161 chashlistnode_t *ptr=*tc_table;
162 chashlistnode_t *curr;
166 chashlistnode_t *newlist=NULL;
167 for(i=0;i<tc_size;i++) {
170 do { //Inner loop to go through linked lists
172 chashlistnode_t *tmp,*next;
174 if ((key=(void *)curr->key) == 0) { //Exit inner loop if there the first element is 0
175 break; //key = val =0 for element if not present within the hash table
178 if (curr->val>=curr_heapbase&&curr->val<curr_heaptop) {
179 SENQUEUE(curr->val, curr->val);
181 //rewrite transaction cache entry
182 void *vptr=curr->val;
183 int type=((int *)vptr)[0];
184 unsigned INTPTR *pointer=pointerarray[type];
186 //array of primitives - do nothing
187 struct ArrayObject *ao=(struct ArrayObject *) vptr;
188 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
189 } else if (((INTPTR)pointer)==1) {
191 struct ArrayObject *ao=(struct ArrayObject *) vptr;
192 int length=ao->___length___;
194 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
195 for(i=0; i<length; i++) {
196 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
197 SENQUEUE(objptr, ((void **)(((char *)&ao->___length___)+sizeof(int)))[i]);
200 INTPTR size=pointer[0];
202 for(i=1; i<=size; i++) {
203 unsigned int offset=pointer[i];
204 void * objptr=*((void **)(((char *)vptr)+offset));
205 SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
211 index = (((unsigned INTPTR)key) & mask) >>4;
215 // Insert into the new table
217 tmp->key = curr->key;
218 tmp->val = curr->val;
221 } else if (isfirst) {
222 chashlistnode_t *newnode;
223 if ((*cstr)->num<NUMCLIST) {
224 newnode=&(*cstr)->array[(*cstr)->num];
228 cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
231 newnode=&tcl->array[0];
234 newnode->key = curr->key;
235 newnode->val = curr->val;
236 newnode->next = tmp->next;
237 newnode->lnext=newlist;
243 curr->next=tmp->next;
257 if ((head==tail)&&(tailindex==headindex))
263 struct pointerblock *taghead=NULL;
266 void enqueuetag(struct ___TagDescriptor___ *ptr) {
267 if (tagindex==NUMPTRS) {
268 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
273 taghead->ptrs[tagindex++]=ptr;
277 #if defined(STM)||defined(THREADS)
278 __thread char * memorybase=NULL;
279 __thread char * memorytop=NULL;
283 void collect(struct garbagelist * stackptr) {
284 #if defined(THREADS)||defined(DSTM)||defined(STM)
286 pthread_mutex_lock(&gclistlock);
288 if ((listcount+1)==threadcount) {
289 break; /* Have all other threads stopped */
291 pthread_cond_wait(&gccond, &gclistlock);
298 for(i=0;i<MAXSTATS;i++)
306 head=tail=malloc(sizeof(struct pointerblock));
312 taghead=malloc(sizeof(struct pointerblock));
319 fixtable(&c_table, &c_list, &c_structs, c_size);
322 fixobjlist(lockedobjs);
328 /* Check current stack */
329 #if defined(THREADS)||defined(DSTM)||defined(STM)
331 struct listitem *listptr=list;
335 while(stackptr!=NULL) {
337 for(i=0; i<stackptr->size; i++) {
338 void * orig=stackptr->array[i];
339 ENQUEUE(orig, stackptr->array[i]);
341 stackptr=stackptr->next;
343 #if defined(THREADS)||defined(DSTM)||defined(STM)
344 /* Go to next thread */
347 if (listptr==&litem) {
348 listptr=listptr->next;
352 struct listitem *litem=pthread_getspecific(litemkey);
353 if (listptr==litem) {
354 listptr=listptr->next;
361 void * orig=listptr->locklist;
362 ENQUEUE(orig, listptr->locklist);
365 if ((*listptr->tc_table)!=NULL) {
366 fixtable(listptr->tc_table, listptr->tc_list, listptr->tc_structs, listptr->tc_size);
367 fixobjlist(listptr->objlist);
369 fixobjlist(listptr->lockedlist);
372 *(listptr->base)=NULL;
374 stackptr=listptr->stackptr;
375 listptr=listptr->next;
383 ENQUEUE(___fcrevert___, ___fcrevert___);
388 /* Update objectsets */
390 for(i=0; i<NUMCLASSES; i++) {
391 #if !defined(MULTICORE)
392 struct parameterwrapper * p=objectqueues[i];
394 struct ObjectHash * set=p->objectset;
395 struct ObjectNode * ptr=set->listhead;
397 void *orig=(void *)ptr->key;
398 ENQUEUE(orig, *((void **)(&ptr->key)));
401 ObjectHashrehash(set); /* Rehash the table */
410 struct cnode * ptr=forward->listhead;
412 void * orig=(void *)ptr->key;
413 ENQUEUE(orig, *((void **)(&ptr->key)));
416 crehash(forward); /* Rehash the table */
420 struct cnode * ptr=reverse->listhead;
422 void *orig=(void *)ptr->val;
423 ENQUEUE(orig, *((void**)(&ptr->val)));
430 struct RuntimeNode * ptr=fdtoobject->listhead;
432 void *orig=(void *)ptr->data;
433 ENQUEUE(orig, *((void**)(&ptr->data)));
439 /* Update current task descriptor */
441 for(i=0; i<currtpd->numParameters; i++) {
442 void *orig=currtpd->parameterArray[i];
443 ENQUEUE(orig, currtpd->parameterArray[i]);
448 /* Update active tasks */
450 struct genpointerlist * ptr=activetasks->list;
452 struct taskparamdescriptor *tpd=ptr->src;
454 for(i=0; i<tpd->numParameters; i++) {
455 void * orig=tpd->parameterArray[i];
456 ENQUEUE(orig, tpd->parameterArray[i]);
460 genrehash(activetasks);
463 /* Update failed tasks */
465 struct genpointerlist * ptr=failedtasks->list;
467 struct taskparamdescriptor *tpd=ptr->src;
469 for(i=0; i<tpd->numParameters; i++) {
470 void * orig=tpd->parameterArray[i];
471 ENQUEUE(orig, tpd->parameterArray[i]);
475 genrehash(failedtasks);
480 void * ptr=dequeue();
481 void *cpy=((void **)ptr)[1];
482 int type=((int *)cpy)[0];
483 unsigned INTPTR * pointer;
487 /* Nothing is inside */
492 pointer=pointerarray[type];
494 /* Array of primitives */
496 #if defined(DSTM)||defined(FASTCHECK)
497 struct ArrayObject *ao=(struct ArrayObject *) ptr;
498 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
499 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
500 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
503 struct ArrayObject *ao=(struct ArrayObject *) ptr;
504 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
505 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
507 } else if (((INTPTR)pointer)==1) {
508 /* Array of pointers */
509 struct ArrayObject *ao=(struct ArrayObject *) ptr;
510 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
511 #if (defined(DSTM)||defined(FASTCHECK))
512 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
513 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
516 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
518 int length=ao->___length___;
520 for(i=0; i<length; i++) {
521 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
522 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
525 INTPTR size=pointer[0];
527 for(i=1; i<=size; i++) {
528 unsigned int offset=pointer[i];
529 void * objptr=*((void **)(((char *)ptr)+offset));
530 ENQUEUE(objptr, *((void **)(((char *)cpy)+offset)));
538 #if defined(THREADS)||defined(DSTM)||defined(STM)
540 pthread_mutex_unlock(&gclistlock);
545 /* Fix up the references from tags. This can't be done earlier,
546 because we don't want tags to keep objects alive */
548 while(taghead!=NULL) {
550 struct pointerblock *tmp=taghead->next;
551 for(i=0; i<tagindex; i++) {
552 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
553 struct ___Object___ *obj=tagd->flagptr;
554 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
556 /* Zero object case */
557 } else if (obj->type==-1) {
558 /* Single object case */
559 copy->flagptr=((struct ___Object___**)obj)[1];
560 } else if (obj->type==OBJECTARRAYTYPE) {
562 struct ArrayObject *ao=(struct ArrayObject *) obj;
566 struct ArrayObject *aonew;
568 /* Count live objects */
569 for(j=0; j<ao->___cachedCode___; j++) {
570 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
575 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
576 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
577 memcpy(aonew, ao, sizeof(struct ArrayObject));
578 aonew->type=OBJECTARRAYTYPE;
579 aonew->___length___=livecount;
581 for(j=0; j<ao->___cachedCode___; j++) {
582 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
583 if (tobj->type==-1) {
584 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
585 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
588 aonew->___cachedCode___=k;
589 for(; k<livecount; k++) {
590 ARRAYSET(aonew, struct ___Object___*, k, NULL);
593 /* No object live anymore */
604 void * tomalloc(int size) {
605 void * ptr=to_heapptr;
612 #if defined(THREADS)||defined(DSTM)||defined(STM)
613 void checkcollect(void * ptr) {
614 stopforgc((struct garbagelist *)ptr);
619 void checkcollect2(void * ptr) {
620 int ptrarray[]={1, (int)ptr, (int) revertlist};
621 stopforgc((struct garbagelist *)ptrarray);
623 revertlist=(struct ___Object___*)ptrarray[2];
627 void stopforgc(struct garbagelist * ptr) {
631 litem.locklist=pthread_getspecific(threadlocks);
634 litem.tc_size=c_size;
635 litem.tc_table=&c_table;
636 litem.tc_list=&c_list;
637 litem.tc_structs=&c_structs;
638 litem.objlist=newobjs;
640 litem.lockedlist=lockedobjs;
642 litem.base=&memorybase;
646 struct listitem *litem=pthread_getspecific(litemkey);
649 litem->locklist=pthread_getspecific(threadlocks);
652 pthread_mutex_lock(&gclistlock);
654 if ((listcount+1)==threadcount) {
655 //only do wakeup if we are ready to GC
656 pthread_cond_signal(&gccond);
658 pthread_mutex_unlock(&gclistlock);
661 void restartaftergc() {
663 pthread_mutex_lock(&gclock); // Wait for GC
664 pthread_mutex_unlock(&gclock);
666 pthread_mutex_lock(&gclistlock);
668 pthread_mutex_unlock(&gclistlock);
672 #if defined(STM)||defined(THREADS)
673 #define MEMORYBLOCK 65536
674 void * helper(struct garbagelist *, int);
675 void * mygcmalloc(struct garbagelist * stackptr, int size) {
678 if (memorybase==NULL||(memorybase+size)>memorytop) {
679 int toallocate=(size>MEMORYBLOCK)?size:MEMORYBLOCK;
680 memorybase=helper(stackptr, toallocate);
681 memorytop=memorybase+toallocate;
683 char *retvalue=memorybase;
688 void * helper(struct garbagelist * stackptr, int size) {
690 void * mygcmalloc(struct garbagelist * stackptr, int size) {
693 #if defined(THREADS)||defined(DSTM)||defined(STM)
694 while (pthread_mutex_trylock(&gclock)!=0) {
703 if (curr_heapptr>curr_heapgcpoint) {
704 if (curr_heapbase==0) {
705 /* Need to allocate base heap */
706 curr_heapbase=malloc(INITIALHEAPSIZE);
707 if (curr_heapbase==NULL) {
708 printf("malloc failed\n");
711 bzero(curr_heapbase, INITIALHEAPSIZE);
712 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
713 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
714 curr_heapptr=curr_heapbase+size;
716 to_heapbase=malloc(INITIALHEAPSIZE);
717 if (to_heapbase==NULL) {
718 printf("malloc failed\n");
721 to_heaptop=to_heapbase+INITIALHEAPSIZE;
722 to_heapptr=to_heapbase;
724 #if defined(THREADS)||defined(DSTM)||defined(STM)
725 pthread_mutex_unlock(&gclock);
730 /* Grow the to heap if necessary */
732 int curr_heapsize=curr_heaptop-curr_heapbase;
733 int to_heapsize=to_heaptop-to_heapbase;
736 last_heapsize=HEAPSIZE(lastgcsize, size);
737 if ((last_heapsize&7)!=0)
738 last_heapsize+=(8-(last_heapsize%8));
740 if (curr_heapsize>last_heapsize)
741 last_heapsize=curr_heapsize;
742 if (last_heapsize>to_heapsize) {
744 to_heapbase=malloc(last_heapsize);
745 if (to_heapbase==NULL) {
746 printf("Error Allocating enough memory\n");
749 to_heaptop=to_heapbase+last_heapsize;
750 to_heapptr=to_heapbase;
754 /* Do our collection */
757 /* Update stat on previous gc size */
758 lastgcsize=(to_heapptr-to_heapbase)+size;
761 printf("Garbage collected: Old bytes: %u\n", curr_heapptr-curr_heapbase);
762 printf("New space: %u\n", to_heapptr-to_heapbase);
763 printf("Total space: %u\n", to_heaptop-to_heapbase);
766 for(i=0;i<MAXSTATS;i++) {
767 if (garbagearray[i]!=0)
768 printf("Type=%d Size=%u\n", i, garbagearray[i]);
772 /* Flip to/curr heaps */
774 void * tmp=to_heapbase;
775 to_heapbase=curr_heapbase;
779 to_heaptop=curr_heaptop;
783 curr_heapptr=to_heapptr+size;
784 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
785 to_heapptr=to_heapbase;
787 /* Not enough room :(, redo gc */
788 if (curr_heapptr>curr_heapgcpoint) {
789 #if defined(THREADS)||defined(DSTM)||defined(STM)
790 pthread_mutex_unlock(&gclock);
792 return mygcmalloc(stackptr, size);
795 bzero(tmp, curr_heaptop-tmp);
796 #if defined(THREADS)||defined(DSTM)||defined(STM)
797 pthread_mutex_unlock(&gclock);
802 #if defined(THREADS)||defined(DSTM)||defined(STM)
803 pthread_mutex_unlock(&gclock);
810 int gc_createcopy(void * orig, void ** copy_ptr) {
815 int type=((int *)orig)[0];
817 *copy_ptr=((void **)orig)[1];
820 if (type<NUMCLASSES) {
821 /* We have a normal object */
823 int size=classsize[type]+sizeof(objheader_t);
824 void *newobj=tomalloc(size);
825 memcpy(newobj,((char *) orig)-sizeof(objheader_t), size);
826 newobj=((char *)newobj)+sizeof(objheader_t);
828 int size=classsize[type];
829 void *newobj=tomalloc(size);
830 memcpy(newobj, orig, size);
833 garbagearray[type]+=size;
836 ((void **)orig)[1]=newobj;
840 /* We have an array */
841 struct ArrayObject *ao=(struct ArrayObject *)orig;
842 int elementsize=classsize[type];
843 int length=ao->___length___;
845 int size=sizeof(struct ArrayObject)+length*elementsize+sizeof(objheader_t);
846 void *newobj=tomalloc(size);
847 memcpy(newobj, ((char*)orig)-sizeof(objheader_t), size);
848 newobj=((char *)newobj)+sizeof(objheader_t);
850 int size=sizeof(struct ArrayObject)+length*elementsize;
851 void *newobj=tomalloc(size);
852 memcpy(newobj, orig, size);
855 garbagearray[type]+=size;
858 ((void **)orig)[1]=newobj;