3 #include "structdefs.h"
5 #include "SimpleHash.h"
6 #include "GenericHashtable.h"
8 #if defined(THREADS) || defined(DSTM)
19 #define INITIALHEAPSIZE 10*1024
20 #define GCPOINT(x) ((int)((x)*0.9))
21 /* This define takes in how full the heap is initially and returns a new heap size to use */
22 #define HEAPSIZE(x,y) (((int)((x)/0.6))+y)
25 extern struct genhashtable * activetasks;
26 extern struct parameterwrapper * objectqueues[NUMCLASSES];
27 extern struct genhashtable * failedtasks;
28 extern struct taskparamdescriptor *currtpd;
29 extern struct RuntimeHash *forward;
30 extern struct RuntimeHash *reverse;
31 extern struct RuntimeHash *fdtoobject;
34 #if defined(THREADS) || defined(DSTM)
36 struct listitem * list=NULL;
41 #define ENQUEUE(orig, dst) \
42 if ((!(((unsigned int)orig)&0x1))) {\
43 if (orig>to_heapbase&&orig<to_heaptop) {\
45 } else if (orig>curr_heapbase&&orig<curr_heaptop) {\
47 if (gc_createcopy(orig,©))\
53 #define ENQUEUE(orig, dst) \
55 if (gc_createcopy(orig,©))\
62 struct pointerblock *next;
65 void * curr_heapbase=0;
66 void * curr_heapptr=0;
67 void * curr_heapgcpoint=0;
68 void * curr_heaptop=0;
75 struct pointerblock *head=NULL;
77 struct pointerblock *tail=NULL;
79 struct pointerblock *spare=NULL;
81 void enqueue(void *ptr) {
82 if (headindex==NUMPTRS) {
83 struct pointerblock * tmp;
88 tmp=malloc(sizeof(struct pointerblock));
93 head->ptrs[headindex++]=ptr;
97 if (tailindex==NUMPTRS) {
98 struct pointerblock *tmp=tail;
106 return tail->ptrs[tailindex++];
110 if ((head==tail)&&(tailindex==headindex))
116 struct pointerblock *taghead=NULL;
119 void enqueuetag(struct ___TagDescriptor___ *ptr) {
120 if (tagindex==NUMPTRS) {
121 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
126 taghead->ptrs[tagindex++]=ptr;
131 void collect(struct garbagelist * stackptr) {
132 #if defined(THREADS)||defined(DSTM)
134 pthread_mutex_lock(&gclistlock);
136 if ((listcount+1)==threadcount) {
137 break; /* Have all other threads stopped */
139 pthread_cond_wait(&gccond, &gclistlock);
146 head=tail=malloc(sizeof(struct pointerblock));
152 taghead=malloc(sizeof(struct pointerblock));
157 /* Check current stack */
158 #if defined(THREADS)||defined(DSTM)
160 struct listitem *listptr=list;
164 while(stackptr!=NULL) {
166 for(i=0;i<stackptr->size;i++) {
167 void * orig=stackptr->array[i];
168 ENQUEUE(orig, stackptr->array[i]);
170 stackptr=stackptr->next;
172 #if defined(THREADS)||defined(DSTM)
173 /* Go to next thread */
175 void * orig=listptr->locklist;
176 ENQUEUE(orig, listptr->locklist);
177 stackptr=listptr->stackptr;
178 listptr=listptr->next;
187 /* Update objectsets */
189 for(i=0;i<NUMCLASSES;i++) {
190 struct parameterwrapper * p=objectqueues[i];
192 struct ObjectHash * set=p->objectset;
193 struct ObjectNode * ptr=set->listhead;
195 void *orig=(void *)ptr->key;
196 ENQUEUE(orig, *((void **)(&ptr->key)));
199 ObjectHashrehash(set); /* Rehash the table */
206 struct RuntimeNode * ptr=forward->listhead;
208 void * orig=(void *)ptr->key;
209 ENQUEUE(orig, *((void **)(&ptr->key)));
212 RuntimeHashrehash(forward); /* Rehash the table */
216 struct RuntimeNode * ptr=reverse->listhead;
218 void *orig=(void *)ptr->data;
219 ENQUEUE(orig, *((void**)(&ptr->data)));
225 struct RuntimeNode * ptr=fdtoobject->listhead;
227 void *orig=(void *)ptr->data;
228 ENQUEUE(orig, *((void**)(&ptr->data)));
234 /* Update current task descriptor */
236 for(i=0;i<currtpd->numParameters;i++) {
237 void *orig=currtpd->parameterArray[i];
238 ENQUEUE(orig, currtpd->parameterArray[i]);
243 /* Update active tasks */
245 struct genpointerlist * ptr=activetasks->list;
247 struct taskparamdescriptor *tpd=ptr->src;
249 for(i=0;i<tpd->numParameters;i++) {
250 void * orig=tpd->parameterArray[i];
251 ENQUEUE(orig, tpd->parameterArray[i]);
255 genrehash(activetasks);
258 /* Update failed tasks */
260 struct genpointerlist * ptr=failedtasks->list;
262 struct taskparamdescriptor *tpd=ptr->src;
264 for(i=0;i<tpd->numParameters;i++) {
265 void * orig=tpd->parameterArray[i];
266 ENQUEUE(orig, tpd->parameterArray[i]);
270 genrehash(failedtasks);
275 void * ptr=dequeue();
276 void *cpy=((void **)ptr)[1];
277 int type=((int *)cpy)[0];
278 unsigned int * pointer;
282 /* Nothing is inside */
287 pointer=pointerarray[type];
289 /* Array of primitives */
291 } else if (((int)pointer)==1) {
292 /* Array of pointers */
293 struct ArrayObject *ao=(struct ArrayObject *) ptr;
294 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
295 int length=ao->___length___;
297 for(i=0;i<length;i++) {
298 void *objptr=((void **)(((char *)& ao->___length___)+sizeof(int)))[i];
299 ENQUEUE(objptr, ((void **)(((char *)& ao_cpy->___length___)+sizeof(int)))[i]);
304 for(i=1;i<=size;i++) {
305 unsigned int offset=pointer[i];
306 void * objptr=*((void **)(((int)ptr)+offset));
307 ENQUEUE(objptr, *((void **) (((int)cpy)+offset)));
315 #if defined(THREADS)||defined(DSTM)
317 pthread_mutex_unlock(&gclistlock);
323 /* Fix up the references from tags. This can't be done earlier,
324 because we don't want tags to keep objects alive */
326 while(taghead!=NULL) {
328 struct pointerblock *tmp=taghead->next;
329 for(i=0;i<tagindex;i++) {
330 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
331 struct ___Object___ *obj=tagd->flagptr;
332 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
334 /* Zero object case */
335 } else if (obj->type==-1) {
336 /* Single object case */
337 copy->flagptr=((struct ___Object___**)obj)[1];
338 } else if (obj->type==OBJECTARRAYTYPE) {
340 struct ArrayObject *ao=(struct ArrayObject *) obj;
344 struct ArrayObject *aonew;
346 /* Count live objects */
347 for(j=0;j<ao->___cachedCode___;j++) {
348 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
353 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
354 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
355 memcpy(aonew, ao, sizeof(struct ArrayObject));
356 aonew->type=OBJECTARRAYTYPE;
357 aonew->___length___=livecount;
359 for(j=0;j<ao->___cachedCode___;j++) {
360 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
361 if (tobj->type==-1) {
362 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
363 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
366 aonew->___cachedCode___=k;
367 for(;k<livecount;k++) {
368 ARRAYSET(aonew, struct ___Object___*, k, NULL);
371 /* No object live anymore */
382 void * tomalloc(int size) {
383 void * ptr=to_heapptr;
390 #if defined(THREADS)||defined(DSTM)
392 void checkcollect(void * ptr) {
394 struct listitem * tmp=stopforgc((struct garbagelist *)ptr);
395 pthread_mutex_lock(&gclock); // Wait for GC
397 pthread_mutex_unlock(&gclock);
402 struct listitem * stopforgc(struct garbagelist * ptr) {
403 struct listitem * litem=malloc(sizeof(struct listitem));
405 litem->locklist=pthread_getspecific(threadlocks);
407 pthread_mutex_lock(&gclistlock);
413 pthread_cond_signal(&gccond);
414 pthread_mutex_unlock(&gclistlock);
418 void restartaftergc(struct listitem * litem) {
419 pthread_mutex_lock(&gclistlock);
420 pthread_setspecific(threadlocks, litem->locklist);
421 if (litem->prev==NULL) {
424 litem->prev->next=litem->next;
426 if (litem->next!=NULL) {
427 litem->next->prev=litem->prev;
430 pthread_mutex_unlock(&gclistlock);
435 void * mygcmalloc(struct garbagelist * stackptr, int size) {
437 #if defined(THREADS)||defined(DSTM)
438 if (pthread_mutex_trylock(&gclock)!=0) {
439 struct listitem *tmp=stopforgc(stackptr);
440 pthread_mutex_lock(&gclock);
448 if (curr_heapptr>curr_heapgcpoint) {
449 if (curr_heapbase==0) {
450 /* Need to allocate base heap */
451 curr_heapbase=malloc(INITIALHEAPSIZE);
452 bzero(curr_heapbase, INITIALHEAPSIZE);
453 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
454 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
455 curr_heapptr=curr_heapbase+size;
457 to_heapbase=malloc(INITIALHEAPSIZE);
458 to_heaptop=to_heapbase+INITIALHEAPSIZE;
459 to_heapptr=to_heapbase;
461 #if defined(THREADS)||defined(DSTM)
462 pthread_mutex_unlock(&gclock);
467 /* Grow the to heap if necessary */
469 int curr_heapsize=curr_heaptop-curr_heapbase;
470 int to_heapsize=to_heaptop-to_heapbase;
473 last_heapsize=HEAPSIZE(lastgcsize, size);
474 if ((last_heapsize%4)!=0)
475 last_heapsize+=(4-(last_heapsize%4));
477 if (curr_heapsize>last_heapsize)
478 last_heapsize=curr_heapsize;
479 if (last_heapsize>to_heapsize) {
481 to_heapbase=malloc(last_heapsize);
482 to_heaptop=to_heapbase+last_heapsize;
483 to_heapptr=to_heapbase;
487 /* Do our collection */
490 /* Update stat on previous gc size */
491 lastgcsize=(to_heapptr-to_heapbase)+size;
493 /* Flip to/curr heaps */
495 void * tmp=to_heapbase;
496 to_heapbase=curr_heapbase;
500 to_heaptop=curr_heaptop;
504 curr_heapptr=to_heapptr+size;
505 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
506 to_heapptr=to_heapbase;
508 /* Not enough room :(, redo gc */
509 if (curr_heapptr>curr_heapgcpoint) {
510 #if defined(THREADS)||defined(DSTM)
511 pthread_mutex_unlock(&gclock);
513 return mygcmalloc(stackptr, size);
516 bzero(tmp, curr_heaptop-tmp);
517 #if defined(THREADS)||defined(DSTM)
518 pthread_mutex_unlock(&gclock);
523 #if defined(THREADS)||defined(DSTM)
524 pthread_mutex_unlock(&gclock);
531 int gc_createcopy(void * orig, void ** copy_ptr) {
536 int type=((int *)orig)[0];
538 *copy_ptr=((void **)orig)[1];
540 } if (type<NUMCLASSES) {
541 /* We have a normal object */
542 int size=classsize[type];
543 void *newobj=tomalloc(size);
544 memcpy(newobj, orig, size);
546 ((void **)orig)[1]=newobj;
550 /* We have an array */
551 struct ArrayObject *ao=(struct ArrayObject *)orig;
552 int elementsize=classsize[type];
553 int length=ao->___length___;
554 int size=sizeof(struct ArrayObject)+length*elementsize;
555 void *newobj=tomalloc(size);
556 memcpy(newobj, orig, size);
558 ((void **)orig)[1]=newobj;