3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
25 #define INITIALHEAPSIZE 32*1024*1024
26 #define GCPOINT(x) ((int)((x)*0.9))
27 /* This define takes in how full the heap is initially and returns a new heap size to use */
28 #define HEAPSIZE(x,y) (((int)((x)/0.6))+y)
31 extern struct genhashtable * activetasks;
33 extern struct parameterwrapper * objectqueues[NUMCLASSES];
35 extern struct genhashtable * failedtasks;
36 extern struct taskparamdescriptor *currtpd;
37 extern struct ctable *forward;
38 extern struct ctable *reverse;
39 extern struct RuntimeHash *fdtoobject;
42 #if defined(THREADS) || defined(DSTM) || defined(STM)
44 struct listitem * list=NULL;
48 //Need to check if pointers are transaction pointers
49 //this also catches the special flag value of 1 for local copies
51 #define ENQUEUE(orig, dst) \
52 if ((!(((unsigned int)orig)&0x1))) { \
53 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
55 if (gc_createcopy(orig,©)) \
60 #elif defined(FASTCHECK)
61 #define ENQUEUE(orig, dst) \
62 if (((unsigned int)orig)!=1) { \
64 if (gc_createcopy(orig,©)) \
68 #define ENQUEUE(orig, dst) \
70 if (gc_createcopy(orig,©)) \
77 struct pointerblock *next;
80 void * curr_heapbase=0;
81 void * curr_heapptr=0;
82 void * curr_heapgcpoint=0;
83 void * curr_heaptop=0;
90 struct pointerblock *head=NULL;
92 struct pointerblock *tail=NULL;
94 struct pointerblock *spare=NULL;
96 void enqueue(void *ptr) {
97 if (headindex==NUMPTRS) {
98 struct pointerblock * tmp;
103 tmp=malloc(sizeof(struct pointerblock));
108 head->ptrs[headindex++]=ptr;
112 if (tailindex==NUMPTRS) {
113 struct pointerblock *tmp=tail;
121 return tail->ptrs[tailindex++];
125 if ((head==tail)&&(tailindex==headindex))
131 struct pointerblock *taghead=NULL;
134 void enqueuetag(struct ___TagDescriptor___ *ptr) {
135 if (tagindex==NUMPTRS) {
136 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
141 taghead->ptrs[tagindex++]=ptr;
146 void collect(struct garbagelist * stackptr) {
147 #if defined(THREADS)||defined(DSTM)
149 pthread_mutex_lock(&gclistlock);
151 if ((listcount+1)==threadcount) {
152 break; /* Have all other threads stopped */
154 pthread_cond_wait(&gccond, &gclistlock);
161 head=tail=malloc(sizeof(struct pointerblock));
167 taghead=malloc(sizeof(struct pointerblock));
172 /* Check current stack */
173 #if defined(THREADS)||defined(DSTM)
175 struct listitem *listptr=list;
179 while(stackptr!=NULL) {
181 for(i=0; i<stackptr->size; i++) {
182 void * orig=stackptr->array[i];
183 ENQUEUE(orig, stackptr->array[i]);
185 stackptr=stackptr->next;
187 #if defined(THREADS)||defined(DSTM)
188 /* Go to next thread */
190 void * orig=listptr->locklist;
191 ENQUEUE(orig, listptr->locklist);
192 stackptr=listptr->stackptr;
193 listptr=listptr->next;
201 ENQUEUE(___fcrevert___, ___fcrevert___);
206 /* Update objectsets */
208 for(i=0; i<NUMCLASSES; i++) {
211 struct parameterwrapper * p=objectqueues[i];
213 struct ObjectHash * set=p->objectset;
214 struct ObjectNode * ptr=set->listhead;
216 void *orig=(void *)ptr->key;
217 ENQUEUE(orig, *((void **)(&ptr->key)));
220 ObjectHashrehash(set); /* Rehash the table */
229 struct cnode * ptr=forward->listhead;
231 void * orig=(void *)ptr->key;
232 ENQUEUE(orig, *((void **)(&ptr->key)));
235 crehash(forward); /* Rehash the table */
239 struct cnode * ptr=reverse->listhead;
241 void *orig=(void *)ptr->val;
242 ENQUEUE(orig, *((void**)(&ptr->val)));
249 struct RuntimeNode * ptr=fdtoobject->listhead;
251 void *orig=(void *)ptr->data;
252 ENQUEUE(orig, *((void**)(&ptr->data)));
258 /* Update current task descriptor */
260 for(i=0; i<currtpd->numParameters; i++) {
261 void *orig=currtpd->parameterArray[i];
262 ENQUEUE(orig, currtpd->parameterArray[i]);
267 /* Update active tasks */
269 struct genpointerlist * ptr=activetasks->list;
271 struct taskparamdescriptor *tpd=ptr->src;
273 for(i=0; i<tpd->numParameters; i++) {
274 void * orig=tpd->parameterArray[i];
275 ENQUEUE(orig, tpd->parameterArray[i]);
279 genrehash(activetasks);
282 /* Update failed tasks */
284 struct genpointerlist * ptr=failedtasks->list;
286 struct taskparamdescriptor *tpd=ptr->src;
288 for(i=0; i<tpd->numParameters; i++) {
289 void * orig=tpd->parameterArray[i];
290 ENQUEUE(orig, tpd->parameterArray[i]);
294 genrehash(failedtasks);
299 void * ptr=dequeue();
300 void *cpy=((void **)ptr)[1];
301 int type=((int *)cpy)[0];
302 unsigned int * pointer;
306 /* Nothing is inside */
311 pointer=pointerarray[type];
313 /* Array of primitives */
315 #if defined(DSTM)||defined(FASTCHECK)
316 struct ArrayObject *ao=(struct ArrayObject *) ptr;
317 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
318 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
319 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
321 } else if (((int)pointer)==1) {
322 /* Array of pointers */
323 struct ArrayObject *ao=(struct ArrayObject *) ptr;
324 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
325 #if (defined(DSTM)||defined(FASTCHECK))
326 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
327 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
329 int length=ao->___length___;
331 for(i=0; i<length; i++) {
332 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
333 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
338 for(i=1; i<=size; i++) {
339 unsigned int offset=pointer[i];
340 void * objptr=*((void **)(((int)ptr)+offset));
341 ENQUEUE(objptr, *((void **)(((int)cpy)+offset)));
349 #if defined(THREADS)||defined(DSTM)
351 pthread_mutex_unlock(&gclistlock);
356 /* Fix up the references from tags. This can't be done earlier,
357 because we don't want tags to keep objects alive */
359 while(taghead!=NULL) {
361 struct pointerblock *tmp=taghead->next;
362 for(i=0; i<tagindex; i++) {
363 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
364 struct ___Object___ *obj=tagd->flagptr;
365 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
367 /* Zero object case */
368 } else if (obj->type==-1) {
369 /* Single object case */
370 copy->flagptr=((struct ___Object___**)obj)[1];
371 } else if (obj->type==OBJECTARRAYTYPE) {
373 struct ArrayObject *ao=(struct ArrayObject *) obj;
377 struct ArrayObject *aonew;
379 /* Count live objects */
380 for(j=0; j<ao->___cachedCode___; j++) {
381 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
386 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
387 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
388 memcpy(aonew, ao, sizeof(struct ArrayObject));
389 aonew->type=OBJECTARRAYTYPE;
390 aonew->___length___=livecount;
392 for(j=0; j<ao->___cachedCode___; j++) {
393 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
394 if (tobj->type==-1) {
395 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
396 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
399 aonew->___cachedCode___=k;
400 for(; k<livecount; k++) {
401 ARRAYSET(aonew, struct ___Object___*, k, NULL);
404 /* No object live anymore */
415 void * tomalloc(int size) {
416 void * ptr=to_heapptr;
423 #if defined(THREADS)||defined(DSTM)||defined(STM)
424 void checkcollect(void * ptr) {
425 struct listitem * tmp=stopforgc((struct garbagelist *)ptr);
426 pthread_mutex_lock(&gclock); // Wait for GC
428 pthread_mutex_unlock(&gclock);
432 void checkcollect2(void * ptr) {
433 int ptrarray[]={1, (int)ptr, (int) revertlist};
434 struct listitem * tmp=stopforgc((struct garbagelist *)ptrarray);
435 pthread_mutex_lock(&gclock); // Wait for GC
437 pthread_mutex_unlock(&gclock);
438 revertlist=(struct ___Object___*)ptrarray[2];
443 struct listitem * stopforgc(struct garbagelist * ptr) {
444 struct listitem * litem=malloc(sizeof(struct listitem));
446 litem->locklist=pthread_getspecific(threadlocks);
448 pthread_mutex_lock(&gclistlock);
454 pthread_cond_signal(&gccond);
455 pthread_mutex_unlock(&gclistlock);
459 void restartaftergc(struct listitem * litem) {
460 pthread_mutex_lock(&gclistlock);
461 pthread_setspecific(threadlocks, litem->locklist);
462 if (litem->prev==NULL) {
465 litem->prev->next=litem->next;
467 if (litem->next!=NULL) {
468 litem->next->prev=litem->prev;
471 pthread_mutex_unlock(&gclistlock);
476 void * mygcmalloc(struct garbagelist * stackptr, int size) {
478 #if defined(THREADS)||defined(DSTM)||defined(STM)
479 if (pthread_mutex_trylock(&gclock)!=0) {
480 struct listitem *tmp=stopforgc(stackptr);
481 pthread_mutex_lock(&gclock);
489 if (curr_heapptr>curr_heapgcpoint) {
490 if (curr_heapbase==0) {
491 /* Need to allocate base heap */
492 curr_heapbase=malloc(INITIALHEAPSIZE);
493 bzero(curr_heapbase, INITIALHEAPSIZE);
494 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
495 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
496 curr_heapptr=curr_heapbase+size;
498 to_heapbase=malloc(INITIALHEAPSIZE);
499 to_heaptop=to_heapbase+INITIALHEAPSIZE;
500 to_heapptr=to_heapbase;
502 #if defined(THREADS)||defined(DSTM)||defined(STM)
503 pthread_mutex_unlock(&gclock);
508 /* Grow the to heap if necessary */
510 int curr_heapsize=curr_heaptop-curr_heapbase;
511 int to_heapsize=to_heaptop-to_heapbase;
514 last_heapsize=HEAPSIZE(lastgcsize, size);
515 if ((last_heapsize&7)!=0)
516 last_heapsize+=(8-(last_heapsize%8));
518 if (curr_heapsize>last_heapsize)
519 last_heapsize=curr_heapsize;
520 if (last_heapsize>to_heapsize) {
522 to_heapbase=malloc(last_heapsize);
523 if (to_heapbase==NULL) {
524 printf("Error Allocating enough memory\n");
527 to_heaptop=to_heapbase+last_heapsize;
528 to_heapptr=to_heapbase;
532 /* Do our collection */
535 /* Update stat on previous gc size */
536 lastgcsize=(to_heapptr-to_heapbase)+size;
538 /* Flip to/curr heaps */
540 void * tmp=to_heapbase;
541 to_heapbase=curr_heapbase;
545 to_heaptop=curr_heaptop;
549 curr_heapptr=to_heapptr+size;
550 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
551 to_heapptr=to_heapbase;
553 /* Not enough room :(, redo gc */
554 if (curr_heapptr>curr_heapgcpoint) {
555 #if defined(THREADS)||defined(DSTM)||defined(STM)
556 pthread_mutex_unlock(&gclock);
558 return mygcmalloc(stackptr, size);
561 bzero(tmp, curr_heaptop-tmp);
562 #if defined(THREADS)||defined(DSTM)||defined(STM)
563 pthread_mutex_unlock(&gclock);
568 #if defined(THREADS)||defined(DSTM)||defined(STM)
569 pthread_mutex_unlock(&gclock);
576 int gc_createcopy(void * orig, void ** copy_ptr) {
581 int type=((int *)orig)[0];
583 *copy_ptr=((void **)orig)[1];
586 if (type<NUMCLASSES) {
587 /* We have a normal object */
588 int size=classsize[type];
589 void *newobj=tomalloc(size);
590 memcpy(newobj, orig, size);
592 ((void **)orig)[1]=newobj;
596 /* We have an array */
597 struct ArrayObject *ao=(struct ArrayObject *)orig;
598 int elementsize=classsize[type];
599 int length=ao->___length___;
600 int size=sizeof(struct ArrayObject)+length*elementsize;
601 void *newobj=tomalloc(size);
602 memcpy(newobj, orig, size);
604 ((void **)orig)[1]=newobj;