3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
25 #define INITIALHEAPSIZE 128*1024*1024
26 #define GCPOINT(x) ((int)((x)*0.95))
27 /* This define takes in how full the heap is initially and returns a new heap size to use */
28 #define HEAPSIZE(x,y) ((int)(x+y))*2
31 extern struct genhashtable * activetasks;
33 extern struct parameterwrapper * objectqueues[NUMCLASSES];
35 extern struct genhashtable * failedtasks;
36 extern struct taskparamdescriptor *currtpd;
37 extern struct ctable *forward;
38 extern struct ctable *reverse;
39 extern struct RuntimeHash *fdtoobject;
42 #if defined(THREADS) || defined(DSTM) || defined(STM)
44 struct listitem * list=NULL;
48 //Need to check if pointers are transaction pointers
49 //this also catches the special flag value of 1 for local copies
51 #define ENQUEUE(orig, dst) \
52 if ((!(((unsigned int)orig)&0x1))) { \
53 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
55 if (gc_createcopy(orig,©)) \
61 #define ENQUEUE(orig, dst) \
62 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
64 if (gc_createcopy(orig,©)) \
68 #define SENQUEUE(orig, dst) \
71 if (gc_createcopy(orig,©)) \
75 #elif defined(FASTCHECK)
76 #define ENQUEUE(orig, dst) \
77 if (((unsigned int)orig)!=1) { \
79 if (gc_createcopy(orig,©)) \
83 #define ENQUEUE(orig, dst) \
85 if (gc_createcopy(orig,©)) \
92 struct pointerblock *next;
95 void * curr_heapbase=0;
96 void * curr_heapptr=0;
97 void * curr_heapgcpoint=0;
98 void * curr_heaptop=0;
100 void * to_heapbase=0;
105 struct pointerblock *head=NULL;
107 struct pointerblock *tail=NULL;
109 struct pointerblock *spare=NULL;
111 void enqueue(void *ptr) {
112 if (headindex==NUMPTRS) {
113 struct pointerblock * tmp;
118 tmp=malloc(sizeof(struct pointerblock));
123 head->ptrs[headindex++]=ptr;
127 if (tailindex==NUMPTRS) {
128 struct pointerblock *tmp=tail;
136 return tail->ptrs[tailindex++];
140 void fixtable(chashlistnode_t ** tc_table, unsigned int tc_size) {
141 unsigned int mask=(tc_size<<1)-1;
142 chashlistnode_t *node=calloc(tc_size, sizeof(chashlistnode_t));
143 chashlistnode_t *ptr=*tc_table;
144 chashlistnode_t *curr;
148 for(i=0;i<tc_size;i++) {
151 do { //Inner loop to go through linked lists
153 chashlistnode_t *tmp,*next;
155 if ((key=(void *)curr->key) == 0) { //Exit inner loop if there the first element is 0
156 break; //key = val =0 for element if not present within the hash table
159 if (key>=curr_heapbase&&key<curr_heaptop) {
160 SENQUEUE(curr->val, curr->val);
162 //rewrite transaction cache entry
164 int type=((int *)ptr)[0];
165 unsigned int *pointer=pointerarray[type];
167 //array of primitives - do nothing
168 } else if (((int)pointer)==1) {
170 struct ArrayObject *ao=(struct ArrayObject *) ptr;
171 int length=ao->___length___;
173 for(i=0; i<length; i++) {
174 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
175 ENQUEUE(objptr, ((void **)(((char *)&ao->___length___)+sizeof(int)))[i]);
180 for(i=1; i<=size; i++) {
181 unsigned int offset=pointer[i];
182 void * objptr=*((void **)(((int)ptr)+offset));
183 ENQUEUE(objptr, *((void **)(((int)ptr)+offset)));
189 index = (((unsigned int)key) & mask) >>1;
191 curr->key=(unsigned int)key;
193 // Insert into the new table
195 tmp->key = curr->key;
196 tmp->val = curr->val;
201 curr->next=tmp->next;
214 if ((head==tail)&&(tailindex==headindex))
220 struct pointerblock *taghead=NULL;
223 void enqueuetag(struct ___TagDescriptor___ *ptr) {
224 if (tagindex==NUMPTRS) {
225 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
230 taghead->ptrs[tagindex++]=ptr;
235 void collect(struct garbagelist * stackptr) {
236 #if defined(THREADS)||defined(DSTM)||defined(STM)
238 pthread_mutex_lock(&gclistlock);
240 if ((listcount+1)==threadcount) {
241 break; /* Have all other threads stopped */
243 pthread_cond_wait(&gccond, &gclistlock);
250 head=tail=malloc(sizeof(struct pointerblock));
256 taghead=malloc(sizeof(struct pointerblock));
261 /* Check current stack */
262 #if defined(THREADS)||defined(DSTM)||defined(STM)
264 struct listitem *listptr=list;
268 while(stackptr!=NULL) {
270 for(i=0; i<stackptr->size; i++) {
271 void * orig=stackptr->array[i];
272 ENQUEUE(orig, stackptr->array[i]);
274 stackptr=stackptr->next;
276 #if defined(THREADS)||defined(DSTM)||defined(STM)
277 /* Go to next thread */
280 void * orig=listptr->locklist;
281 ENQUEUE(orig, listptr->locklist);
284 if ((*listptr->tc_table)!=NULL)
285 fixtable(listptr->tc_table, listptr->tc_size);
287 stackptr=listptr->stackptr;
288 listptr=listptr->next;
296 ENQUEUE(___fcrevert___, ___fcrevert___);
301 /* Update objectsets */
303 for(i=0; i<NUMCLASSES; i++) {
304 #if !defined(MULTICORE)
305 struct parameterwrapper * p=objectqueues[i];
307 struct ObjectHash * set=p->objectset;
308 struct ObjectNode * ptr=set->listhead;
310 void *orig=(void *)ptr->key;
311 ENQUEUE(orig, *((void **)(&ptr->key)));
314 ObjectHashrehash(set); /* Rehash the table */
323 struct cnode * ptr=forward->listhead;
325 void * orig=(void *)ptr->key;
326 ENQUEUE(orig, *((void **)(&ptr->key)));
329 crehash(forward); /* Rehash the table */
333 struct cnode * ptr=reverse->listhead;
335 void *orig=(void *)ptr->val;
336 ENQUEUE(orig, *((void**)(&ptr->val)));
343 struct RuntimeNode * ptr=fdtoobject->listhead;
345 void *orig=(void *)ptr->data;
346 ENQUEUE(orig, *((void**)(&ptr->data)));
352 /* Update current task descriptor */
354 for(i=0; i<currtpd->numParameters; i++) {
355 void *orig=currtpd->parameterArray[i];
356 ENQUEUE(orig, currtpd->parameterArray[i]);
361 /* Update active tasks */
363 struct genpointerlist * ptr=activetasks->list;
365 struct taskparamdescriptor *tpd=ptr->src;
367 for(i=0; i<tpd->numParameters; i++) {
368 void * orig=tpd->parameterArray[i];
369 ENQUEUE(orig, tpd->parameterArray[i]);
373 genrehash(activetasks);
376 /* Update failed tasks */
378 struct genpointerlist * ptr=failedtasks->list;
380 struct taskparamdescriptor *tpd=ptr->src;
382 for(i=0; i<tpd->numParameters; i++) {
383 void * orig=tpd->parameterArray[i];
384 ENQUEUE(orig, tpd->parameterArray[i]);
388 genrehash(failedtasks);
393 void * ptr=dequeue();
394 void *cpy=((void **)ptr)[1];
395 int type=((int *)cpy)[0];
396 unsigned int * pointer;
400 /* Nothing is inside */
405 pointer=pointerarray[type];
407 /* Array of primitives */
409 #if defined(DSTM)||defined(FASTCHECK)
410 struct ArrayObject *ao=(struct ArrayObject *) ptr;
411 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
412 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
413 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
415 } else if (((int)pointer)==1) {
416 /* Array of pointers */
417 struct ArrayObject *ao=(struct ArrayObject *) ptr;
418 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
419 #if (defined(DSTM)||defined(FASTCHECK))
420 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
421 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
423 int length=ao->___length___;
425 for(i=0; i<length; i++) {
426 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
427 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
432 for(i=1; i<=size; i++) {
433 unsigned int offset=pointer[i];
434 void * objptr=*((void **)(((int)ptr)+offset));
435 ENQUEUE(objptr, *((void **)(((int)cpy)+offset)));
443 #if defined(THREADS)||defined(DSTM)||defined(STM)
445 pthread_mutex_unlock(&gclistlock);
450 /* Fix up the references from tags. This can't be done earlier,
451 because we don't want tags to keep objects alive */
453 while(taghead!=NULL) {
455 struct pointerblock *tmp=taghead->next;
456 for(i=0; i<tagindex; i++) {
457 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
458 struct ___Object___ *obj=tagd->flagptr;
459 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
461 /* Zero object case */
462 } else if (obj->type==-1) {
463 /* Single object case */
464 copy->flagptr=((struct ___Object___**)obj)[1];
465 } else if (obj->type==OBJECTARRAYTYPE) {
467 struct ArrayObject *ao=(struct ArrayObject *) obj;
471 struct ArrayObject *aonew;
473 /* Count live objects */
474 for(j=0; j<ao->___cachedCode___; j++) {
475 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
480 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
481 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
482 memcpy(aonew, ao, sizeof(struct ArrayObject));
483 aonew->type=OBJECTARRAYTYPE;
484 aonew->___length___=livecount;
486 for(j=0; j<ao->___cachedCode___; j++) {
487 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
488 if (tobj->type==-1) {
489 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
490 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
493 aonew->___cachedCode___=k;
494 for(; k<livecount; k++) {
495 ARRAYSET(aonew, struct ___Object___*, k, NULL);
498 /* No object live anymore */
509 void * tomalloc(int size) {
510 void * ptr=to_heapptr;
517 #if defined(THREADS)||defined(DSTM)||defined(STM)
518 void checkcollect(void * ptr) {
519 struct listitem * tmp=stopforgc((struct garbagelist *)ptr);
520 pthread_mutex_lock(&gclock); // Wait for GC
522 pthread_mutex_unlock(&gclock);
526 void checkcollect2(void * ptr) {
527 int ptrarray[]={1, (int)ptr, (int) revertlist};
528 struct listitem * tmp=stopforgc((struct garbagelist *)ptrarray);
529 pthread_mutex_lock(&gclock); // Wait for GC
531 pthread_mutex_unlock(&gclock);
532 revertlist=(struct ___Object___*)ptrarray[2];
537 struct listitem * stopforgc(struct garbagelist * ptr) {
538 struct listitem * litem=malloc(sizeof(struct listitem));
541 litem->locklist=pthread_getspecific(threadlocks);
544 litem->tc_size=c_size;
545 litem->tc_table=&c_table;
548 pthread_mutex_lock(&gclistlock);
554 pthread_cond_signal(&gccond);
555 pthread_mutex_unlock(&gclistlock);
559 void restartaftergc(struct listitem * litem) {
560 pthread_mutex_lock(&gclistlock);
562 pthread_setspecific(threadlocks, litem->locklist);
564 if (litem->prev==NULL) {
567 litem->prev->next=litem->next;
569 if (litem->next!=NULL) {
570 litem->next->prev=litem->prev;
573 pthread_mutex_unlock(&gclistlock);
578 void * mygcmalloc(struct garbagelist * stackptr, int size) {
580 #if defined(THREADS)||defined(DSTM)||defined(STM)
581 if (pthread_mutex_trylock(&gclock)!=0) {
582 struct listitem *tmp=stopforgc(stackptr);
583 pthread_mutex_lock(&gclock);
591 if (curr_heapptr>curr_heapgcpoint) {
592 if (curr_heapbase==0) {
593 /* Need to allocate base heap */
594 curr_heapbase=malloc(INITIALHEAPSIZE);
595 bzero(curr_heapbase, INITIALHEAPSIZE);
596 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
597 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
598 curr_heapptr=curr_heapbase+size;
600 to_heapbase=malloc(INITIALHEAPSIZE);
601 to_heaptop=to_heapbase+INITIALHEAPSIZE;
602 to_heapptr=to_heapbase;
604 #if defined(THREADS)||defined(DSTM)||defined(STM)
605 pthread_mutex_unlock(&gclock);
610 /* Grow the to heap if necessary */
612 int curr_heapsize=curr_heaptop-curr_heapbase;
613 int to_heapsize=to_heaptop-to_heapbase;
616 last_heapsize=HEAPSIZE(lastgcsize, size);
617 if ((last_heapsize&7)!=0)
618 last_heapsize+=(8-(last_heapsize%8));
620 if (curr_heapsize>last_heapsize)
621 last_heapsize=curr_heapsize;
622 if (last_heapsize>to_heapsize) {
624 to_heapbase=malloc(last_heapsize);
625 if (to_heapbase==NULL) {
626 printf("Error Allocating enough memory\n");
629 to_heaptop=to_heapbase+last_heapsize;
630 to_heapptr=to_heapbase;
634 /* Do our collection */
637 /* Update stat on previous gc size */
638 lastgcsize=(to_heapptr-to_heapbase)+size;
641 printf("Garbage collected: Old bytes: %u\n", curr_heapptr-curr_heapbase);
642 printf("New space: %u\n", to_heapptr-to_heapbase);
643 printf("Total space: %u\n", to_heaptop-to_heapbase);
645 /* Flip to/curr heaps */
647 void * tmp=to_heapbase;
648 to_heapbase=curr_heapbase;
652 to_heaptop=curr_heaptop;
656 curr_heapptr=to_heapptr+size;
657 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
658 to_heapptr=to_heapbase;
660 /* Not enough room :(, redo gc */
661 if (curr_heapptr>curr_heapgcpoint) {
662 #if defined(THREADS)||defined(DSTM)||defined(STM)
663 pthread_mutex_unlock(&gclock);
665 return mygcmalloc(stackptr, size);
668 bzero(tmp, curr_heaptop-tmp);
669 #if defined(THREADS)||defined(DSTM)||defined(STM)
670 pthread_mutex_unlock(&gclock);
675 #if defined(THREADS)||defined(DSTM)||defined(STM)
676 pthread_mutex_unlock(&gclock);
683 int gc_createcopy(void * orig, void ** copy_ptr) {
688 int type=((int *)orig)[0];
690 *copy_ptr=((void **)orig)[1];
693 if (type<NUMCLASSES) {
694 /* We have a normal object */
695 int size=classsize[type];
696 void *newobj=tomalloc(size);
697 memcpy(newobj, orig, size);
699 ((void **)orig)[1]=newobj;
703 /* We have an array */
704 struct ArrayObject *ao=(struct ArrayObject *)orig;
705 int elementsize=classsize[type];
706 int length=ao->___length___;
707 int size=sizeof(struct ArrayObject)+length*elementsize;
708 void *newobj=tomalloc(size);
709 memcpy(newobj, orig, size);
711 ((void **)orig)[1]=newobj;