3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
25 #define INITIALHEAPSIZE 128*1024*1024
26 #define GCPOINT(x) ((int)((x)*0.95))
27 /* This define takes in how full the heap is initially and returns a new heap size to use */
28 #define HEAPSIZE(x,y) ((int)(x+y))*2
31 extern struct genhashtable * activetasks;
33 extern struct parameterwrapper * objectqueues[NUMCLASSES];
35 extern struct genhashtable * failedtasks;
36 extern struct taskparamdescriptor *currtpd;
37 extern struct ctable *forward;
38 extern struct ctable *reverse;
39 extern struct RuntimeHash *fdtoobject;
42 #if defined(THREADS) || defined(DSTM) || defined(STM)
44 struct listitem * list=NULL;
47 __thread struct listitem litem;
51 //Need to check if pointers are transaction pointers
52 //this also catches the special flag value of 1 for local copies
54 #define ENQUEUE(orig, dst) \
55 if ((!(((unsigned int)orig)&0x1))) { \
56 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
58 if (gc_createcopy(orig,©)) \
64 #define ENQUEUE(orig, dst) \
65 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
67 if (gc_createcopy(orig,©)) \
71 #define SENQUEUE(orig, dst) \
74 if (gc_createcopy(orig,©)) \
78 #elif defined(FASTCHECK)
79 #define ENQUEUE(orig, dst) \
80 if (((unsigned int)orig)!=1) { \
82 if (gc_createcopy(orig,©)) \
86 #define ENQUEUE(orig, dst) \
88 if (gc_createcopy(orig,©)) \
95 struct pointerblock *next;
98 void * curr_heapbase=0;
99 void * curr_heapptr=0;
100 void * curr_heapgcpoint=0;
101 void * curr_heaptop=0;
103 void * to_heapbase=0;
108 struct pointerblock *head=NULL;
110 struct pointerblock *tail=NULL;
112 struct pointerblock *spare=NULL;
114 void enqueue(void *ptr) {
115 if (headindex==NUMPTRS) {
116 struct pointerblock * tmp;
121 tmp=malloc(sizeof(struct pointerblock));
126 head->ptrs[headindex++]=ptr;
130 if (tailindex==NUMPTRS) {
131 struct pointerblock *tmp=tail;
139 return tail->ptrs[tailindex++];
143 void fixobjlist(struct objlist * ptr) {
146 for(i=0;i<ptr->offset;i++) {
147 SENQUEUE(ptr->objs[i], ptr->objs[i]);
153 void fixtable(chashlistnode_t ** tc_table, chashlistnode_t **tc_list, cliststruct_t **cstr, unsigned int tc_size) {
154 unsigned int mask=(tc_size<<4)-1;
155 chashlistnode_t *node=calloc(tc_size, sizeof(chashlistnode_t));
156 chashlistnode_t *ptr=*tc_table;
157 chashlistnode_t *curr;
161 chashlistnode_t *newlist=NULL;
162 for(i=0;i<tc_size;i++) {
165 do { //Inner loop to go through linked lists
167 chashlistnode_t *tmp,*next;
169 if ((key=(void *)curr->key) == 0) { //Exit inner loop if there the first element is 0
170 break; //key = val =0 for element if not present within the hash table
173 if (curr->val>=curr_heapbase&&curr->val<curr_heaptop) {
174 SENQUEUE(curr->val, curr->val);
176 //rewrite transaction cache entry
177 void *vptr=curr->val;
178 int type=((int *)vptr)[0];
179 unsigned INTPTR *pointer=pointerarray[type];
181 //array of primitives - do nothing
182 struct ArrayObject *ao=(struct ArrayObject *) vptr;
183 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
184 } else if (((INTPTR)pointer)==1) {
186 struct ArrayObject *ao=(struct ArrayObject *) vptr;
187 int length=ao->___length___;
189 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
190 for(i=0; i<length; i++) {
191 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
192 SENQUEUE(objptr, ((void **)(((char *)&ao->___length___)+sizeof(int)))[i]);
195 INTPTR size=pointer[0];
197 for(i=1; i<=size; i++) {
198 unsigned int offset=pointer[i];
199 void * objptr=*((void **)(((char *)vptr)+offset));
200 SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
206 index = (((unsigned INTPTR)key) & mask) >>4;
210 // Insert into the new table
212 tmp->key = curr->key;
213 tmp->val = curr->val;
216 } else if (isfirst) {
217 chashlistnode_t *newnode;
218 if ((*cstr)->num<NUMCLIST) {
219 newnode=&(*cstr)->array[(*cstr)->num];
223 cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
226 newnode=&tcl->array[0];
229 newnode->key = curr->key;
230 newnode->val = curr->val;
231 newnode->next = tmp->next;
232 newnode->lnext=newlist;
238 curr->next=tmp->next;
252 if ((head==tail)&&(tailindex==headindex))
258 struct pointerblock *taghead=NULL;
261 void enqueuetag(struct ___TagDescriptor___ *ptr) {
262 if (tagindex==NUMPTRS) {
263 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
268 taghead->ptrs[tagindex++]=ptr;
272 #if defined(STM)||defined(THREADS)
273 __thread char * memorybase=NULL;
274 __thread char * memorytop=NULL;
278 void collect(struct garbagelist * stackptr) {
279 #if defined(THREADS)||defined(DSTM)||defined(STM)
281 pthread_mutex_lock(&gclistlock);
283 if ((listcount+1)==threadcount) {
284 break; /* Have all other threads stopped */
286 pthread_cond_wait(&gccond, &gclistlock);
293 head=tail=malloc(sizeof(struct pointerblock));
299 taghead=malloc(sizeof(struct pointerblock));
306 fixtable(&c_table, &c_list, &c_structs, c_size);
309 fixobjlist(lockedobjs);
315 /* Check current stack */
316 #if defined(THREADS)||defined(DSTM)||defined(STM)
318 struct listitem *listptr=list;
322 while(stackptr!=NULL) {
324 for(i=0; i<stackptr->size; i++) {
325 void * orig=stackptr->array[i];
326 ENQUEUE(orig, stackptr->array[i]);
328 stackptr=stackptr->next;
330 #if defined(THREADS)||defined(DSTM)||defined(STM)
331 /* Go to next thread */
334 if (listptr==&litem) {
335 listptr=listptr->next;
339 struct listitem *litem=pthread_getspecific(litemkey);
340 if (listptr==litem) {
341 listptr=listptr->next;
348 void * orig=listptr->locklist;
349 ENQUEUE(orig, listptr->locklist);
352 if ((*listptr->tc_table)!=NULL) {
353 fixtable(listptr->tc_table, listptr->tc_list, listptr->tc_structs, listptr->tc_size);
354 fixobjlist(listptr->objlist);
356 fixobjlist(listptr->lockedlist);
359 *(listptr->base)=NULL;
361 stackptr=listptr->stackptr;
362 listptr=listptr->next;
370 ENQUEUE(___fcrevert___, ___fcrevert___);
375 /* Update objectsets */
377 for(i=0; i<NUMCLASSES; i++) {
378 #if !defined(MULTICORE)
379 struct parameterwrapper * p=objectqueues[i];
381 struct ObjectHash * set=p->objectset;
382 struct ObjectNode * ptr=set->listhead;
384 void *orig=(void *)ptr->key;
385 ENQUEUE(orig, *((void **)(&ptr->key)));
388 ObjectHashrehash(set); /* Rehash the table */
397 struct cnode * ptr=forward->listhead;
399 void * orig=(void *)ptr->key;
400 ENQUEUE(orig, *((void **)(&ptr->key)));
403 crehash(forward); /* Rehash the table */
407 struct cnode * ptr=reverse->listhead;
409 void *orig=(void *)ptr->val;
410 ENQUEUE(orig, *((void**)(&ptr->val)));
417 struct RuntimeNode * ptr=fdtoobject->listhead;
419 void *orig=(void *)ptr->data;
420 ENQUEUE(orig, *((void**)(&ptr->data)));
426 /* Update current task descriptor */
428 for(i=0; i<currtpd->numParameters; i++) {
429 void *orig=currtpd->parameterArray[i];
430 ENQUEUE(orig, currtpd->parameterArray[i]);
435 /* Update active tasks */
437 struct genpointerlist * ptr=activetasks->list;
439 struct taskparamdescriptor *tpd=ptr->src;
441 for(i=0; i<tpd->numParameters; i++) {
442 void * orig=tpd->parameterArray[i];
443 ENQUEUE(orig, tpd->parameterArray[i]);
447 genrehash(activetasks);
450 /* Update failed tasks */
452 struct genpointerlist * ptr=failedtasks->list;
454 struct taskparamdescriptor *tpd=ptr->src;
456 for(i=0; i<tpd->numParameters; i++) {
457 void * orig=tpd->parameterArray[i];
458 ENQUEUE(orig, tpd->parameterArray[i]);
462 genrehash(failedtasks);
467 void * ptr=dequeue();
468 void *cpy=((void **)ptr)[1];
469 int type=((int *)cpy)[0];
470 unsigned INTPTR * pointer;
474 /* Nothing is inside */
479 pointer=pointerarray[type];
481 /* Array of primitives */
483 #if defined(DSTM)||defined(FASTCHECK)
484 struct ArrayObject *ao=(struct ArrayObject *) ptr;
485 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
486 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
487 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
490 struct ArrayObject *ao=(struct ArrayObject *) ptr;
491 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
492 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
494 } else if (((INTPTR)pointer)==1) {
495 /* Array of pointers */
496 struct ArrayObject *ao=(struct ArrayObject *) ptr;
497 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
498 #if (defined(DSTM)||defined(FASTCHECK))
499 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
500 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
503 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
505 int length=ao->___length___;
507 for(i=0; i<length; i++) {
508 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
509 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
512 INTPTR size=pointer[0];
514 for(i=1; i<=size; i++) {
515 unsigned int offset=pointer[i];
516 void * objptr=*((void **)(((char *)ptr)+offset));
517 ENQUEUE(objptr, *((void **)(((char *)cpy)+offset)));
525 #if defined(THREADS)||defined(DSTM)||defined(STM)
527 pthread_mutex_unlock(&gclistlock);
532 /* Fix up the references from tags. This can't be done earlier,
533 because we don't want tags to keep objects alive */
535 while(taghead!=NULL) {
537 struct pointerblock *tmp=taghead->next;
538 for(i=0; i<tagindex; i++) {
539 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
540 struct ___Object___ *obj=tagd->flagptr;
541 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
543 /* Zero object case */
544 } else if (obj->type==-1) {
545 /* Single object case */
546 copy->flagptr=((struct ___Object___**)obj)[1];
547 } else if (obj->type==OBJECTARRAYTYPE) {
549 struct ArrayObject *ao=(struct ArrayObject *) obj;
553 struct ArrayObject *aonew;
555 /* Count live objects */
556 for(j=0; j<ao->___cachedCode___; j++) {
557 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
562 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
563 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
564 memcpy(aonew, ao, sizeof(struct ArrayObject));
565 aonew->type=OBJECTARRAYTYPE;
566 aonew->___length___=livecount;
568 for(j=0; j<ao->___cachedCode___; j++) {
569 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
570 if (tobj->type==-1) {
571 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
572 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
575 aonew->___cachedCode___=k;
576 for(; k<livecount; k++) {
577 ARRAYSET(aonew, struct ___Object___*, k, NULL);
580 /* No object live anymore */
591 void * tomalloc(int size) {
592 void * ptr=to_heapptr;
599 #if defined(THREADS)||defined(DSTM)||defined(STM)
600 void checkcollect(void * ptr) {
601 stopforgc((struct garbagelist *)ptr);
606 void checkcollect2(void * ptr) {
607 int ptrarray[]={1, (int)ptr, (int) revertlist};
608 stopforgc((struct garbagelist *)ptrarray);
610 revertlist=(struct ___Object___*)ptrarray[2];
614 void stopforgc(struct garbagelist * ptr) {
618 litem.locklist=pthread_getspecific(threadlocks);
621 litem.tc_size=c_size;
622 litem.tc_table=&c_table;
623 litem.tc_list=&c_list;
624 litem.tc_structs=&c_structs;
625 litem.objlist=newobjs;
627 litem.lockedlist=lockedobjs;
629 litem.base=&memorybase;
633 struct listitem *litem=pthread_getspecific(litemkey);
636 litem->locklist=pthread_getspecific(threadlocks);
639 pthread_mutex_lock(&gclistlock);
641 if ((listcount+1)==threadcount) {
642 //only do wakeup if we are ready to GC
643 pthread_cond_signal(&gccond);
645 pthread_mutex_unlock(&gclistlock);
648 void restartaftergc() {
650 pthread_mutex_lock(&gclock); // Wait for GC
651 pthread_mutex_unlock(&gclock);
653 pthread_mutex_lock(&gclistlock);
655 pthread_mutex_unlock(&gclistlock);
659 #if defined(STM)||defined(THREADS)
660 #define MEMORYBLOCK 65536
661 void * helper(struct garbagelist *, int);
662 void * mygcmalloc(struct garbagelist * stackptr, int size) {
665 if (memorybase==NULL||(memorybase+size)>memorytop) {
666 int toallocate=(size>MEMORYBLOCK)?size:MEMORYBLOCK;
667 memorybase=helper(stackptr, toallocate);
668 memorytop=memorybase+toallocate;
670 char *retvalue=memorybase;
675 void * helper(struct garbagelist * stackptr, int size) {
677 void * mygcmalloc(struct garbagelist * stackptr, int size) {
680 #if defined(THREADS)||defined(DSTM)||defined(STM)
681 while (pthread_mutex_trylock(&gclock)!=0) {
690 if (curr_heapptr>curr_heapgcpoint) {
691 if (curr_heapbase==0) {
692 /* Need to allocate base heap */
693 curr_heapbase=malloc(INITIALHEAPSIZE);
694 if (curr_heapbase==NULL) {
695 printf("malloc failed\n");
698 bzero(curr_heapbase, INITIALHEAPSIZE);
699 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
700 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
701 curr_heapptr=curr_heapbase+size;
703 to_heapbase=malloc(INITIALHEAPSIZE);
704 if (to_heapbase==NULL) {
705 printf("malloc failed\n");
708 to_heaptop=to_heapbase+INITIALHEAPSIZE;
709 to_heapptr=to_heapbase;
711 #if defined(THREADS)||defined(DSTM)||defined(STM)
712 pthread_mutex_unlock(&gclock);
717 /* Grow the to heap if necessary */
719 int curr_heapsize=curr_heaptop-curr_heapbase;
720 int to_heapsize=to_heaptop-to_heapbase;
723 last_heapsize=HEAPSIZE(lastgcsize, size);
724 if ((last_heapsize&7)!=0)
725 last_heapsize+=(8-(last_heapsize%8));
727 if (curr_heapsize>last_heapsize)
728 last_heapsize=curr_heapsize;
729 if (last_heapsize>to_heapsize) {
731 to_heapbase=malloc(last_heapsize);
732 if (to_heapbase==NULL) {
733 printf("Error Allocating enough memory\n");
736 to_heaptop=to_heapbase+last_heapsize;
737 to_heapptr=to_heapbase;
741 /* Do our collection */
744 /* Update stat on previous gc size */
745 lastgcsize=(to_heapptr-to_heapbase)+size;
748 printf("Garbage collected: Old bytes: %u\n", curr_heapptr-curr_heapbase);
749 printf("New space: %u\n", to_heapptr-to_heapbase);
750 printf("Total space: %u\n", to_heaptop-to_heapbase);
752 /* Flip to/curr heaps */
754 void * tmp=to_heapbase;
755 to_heapbase=curr_heapbase;
759 to_heaptop=curr_heaptop;
763 curr_heapptr=to_heapptr+size;
764 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
765 to_heapptr=to_heapbase;
767 /* Not enough room :(, redo gc */
768 if (curr_heapptr>curr_heapgcpoint) {
769 #if defined(THREADS)||defined(DSTM)||defined(STM)
770 pthread_mutex_unlock(&gclock);
772 return mygcmalloc(stackptr, size);
775 bzero(tmp, curr_heaptop-tmp);
776 #if defined(THREADS)||defined(DSTM)||defined(STM)
777 pthread_mutex_unlock(&gclock);
782 #if defined(THREADS)||defined(DSTM)||defined(STM)
783 pthread_mutex_unlock(&gclock);
790 int gc_createcopy(void * orig, void ** copy_ptr) {
795 int type=((int *)orig)[0];
797 *copy_ptr=((void **)orig)[1];
800 if (type<NUMCLASSES) {
801 /* We have a normal object */
803 int size=classsize[type]+sizeof(objheader_t);
804 void *newobj=tomalloc(size);
805 memcpy(newobj,((char *) orig)-sizeof(objheader_t), size);
806 newobj=((char *)newobj)+sizeof(objheader_t);
808 int size=classsize[type];
809 void *newobj=tomalloc(size);
810 memcpy(newobj, orig, size);
813 ((void **)orig)[1]=newobj;
817 /* We have an array */
818 struct ArrayObject *ao=(struct ArrayObject *)orig;
819 int elementsize=classsize[type];
820 int length=ao->___length___;
822 int size=sizeof(struct ArrayObject)+length*elementsize+sizeof(objheader_t);
823 void *newobj=tomalloc(size);
824 memcpy(newobj, ((char*)orig)-sizeof(objheader_t), size);
825 newobj=((char *)newobj)+sizeof(objheader_t);
827 int size=sizeof(struct ArrayObject)+length*elementsize;
828 void *newobj=tomalloc(size);
829 memcpy(newobj, orig, size);
833 ((void **)orig)[1]=newobj;