3 #include "structdefs.h"
5 #include "SimpleHash.h"
6 #include "GenericHashtable.h"
8 #if defined(THREADS) || defined(DSTM)
19 #define INITIALHEAPSIZE 10*1024
20 #define GCPOINT(x) ((int)((x)*0.9))
21 /* This define takes in how full the heap is initially and returns a new heap size to use */
22 #define HEAPSIZE(x,y) (((int)((x)/0.6))+y)
25 extern struct genhashtable * activetasks;
26 extern struct parameterwrapper * objectqueues[NUMCLASSES];
27 extern struct genhashtable * failedtasks;
28 extern struct taskparamdescriptor *currtpd;
29 extern struct RuntimeHash *forward;
30 extern struct RuntimeHash *reverse;
31 extern struct RuntimeHash *fdtoobject;
34 #if defined(THREADS) || defined(DSTM)
36 struct listitem * list=NULL;
41 #define ENQUEUE(orig, dst) \
42 if ((!(((unsigned int)orig)&0x1))) {\
43 if (orig>to_heapbase&&orig<to_heaptop) {\
45 } else if (orig>curr_heapbase&&orig<curr_heaptop) {\
47 if (gc_createcopy(orig,©))\
53 #define ENQUEUE(orig, dst) \
55 if (gc_createcopy(orig,©))\
62 struct pointerblock *next;
65 void * curr_heapbase=0;
66 void * curr_heapptr=0;
67 void * curr_heapgcpoint=0;
68 void * curr_heaptop=0;
75 struct pointerblock *head=NULL;
77 struct pointerblock *tail=NULL;
79 struct pointerblock *spare=NULL;
81 void enqueue(void *ptr) {
82 if (headindex==NUMPTRS) {
83 struct pointerblock * tmp;
88 tmp=malloc(sizeof(struct pointerblock));
93 head->ptrs[headindex++]=ptr;
97 if (tailindex==NUMPTRS) {
98 struct pointerblock *tmp=tail;
106 return tail->ptrs[tailindex++];
110 if ((head==tail)&&(tailindex==headindex))
116 struct pointerblock *taghead=NULL;
119 void enqueuetag(struct ___TagDescriptor___ *ptr) {
120 if (tagindex==NUMPTRS) {
121 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
126 taghead->ptrs[tagindex++]=ptr;
131 void collect(struct garbagelist * stackptr) {
132 #if defined(THREADS)||defined(DSTM)
134 pthread_mutex_lock(&gclistlock);
136 if ((listcount+1)==threadcount) {
137 break; /* Have all other threads stopped */
139 pthread_cond_wait(&gccond, &gclistlock);
146 head=tail=malloc(sizeof(struct pointerblock));
152 taghead=malloc(sizeof(struct pointerblock));
157 /* Check current stack */
158 #if defined(THREADS)||defined(DSTM)
160 struct listitem *listptr=list;
164 while(stackptr!=NULL) {
166 for(i=0;i<stackptr->size;i++) {
167 void * orig=stackptr->array[i];
168 ENQUEUE(orig, stackptr->array[i]);
170 stackptr=stackptr->next;
172 #if defined(THREADS)||defined(DSTM)
173 /* Go to next thread */
175 void * orig=listptr->locklist;
176 ENQUEUE(orig, listptr->locklist);
177 stackptr=listptr->stackptr;
178 listptr=listptr->next;
187 /* Update objectsets */
189 for(i=0;i<NUMCLASSES;i++) {
190 struct parameterwrapper * p=objectqueues[i];
192 struct ObjectHash * set=p->objectset;
193 struct ObjectNode * ptr=set->listhead;
195 void *orig=(void *)ptr->key;
196 ENQUEUE(orig, *((void **)(&ptr->key)));
199 ObjectHashrehash(set); /* Rehash the table */
206 struct RuntimeNode * ptr=forward->listhead;
208 void * orig=(void *)ptr->key;
209 ENQUEUE(orig, *((void **)(&ptr->key)));
212 RuntimeHashrehash(forward); /* Rehash the table */
216 struct RuntimeNode * ptr=reverse->listhead;
218 void *orig=(void *)ptr->data;
219 ENQUEUE(orig, *((void**)(&ptr->data)));
225 struct RuntimeNode * ptr=fdtoobject->listhead;
227 void *orig=(void *)ptr->data;
228 ENQUEUE(orig, *((void**)(&ptr->data)));
234 /* Update current task descriptor */
236 for(i=0;i<currtpd->numParameters;i++) {
237 void *orig=currtpd->parameterArray[i];
238 ENQUEUE(orig, currtpd->parameterArray[i]);
243 /* Update active tasks */
245 struct genpointerlist * ptr=activetasks->list;
247 struct taskparamdescriptor *tpd=ptr->src;
249 for(i=0;i<tpd->numParameters;i++) {
250 void * orig=tpd->parameterArray[i];
251 ENQUEUE(orig, tpd->parameterArray[i]);
255 genrehash(activetasks);
258 /* Update failed tasks */
260 struct genpointerlist * ptr=failedtasks->list;
262 struct taskparamdescriptor *tpd=ptr->src;
264 for(i=0;i<tpd->numParameters;i++) {
265 void * orig=tpd->parameterArray[i];
266 ENQUEUE(orig, tpd->parameterArray[i]);
270 genrehash(failedtasks);
275 void * ptr=dequeue();
276 void *cpy=((void **)ptr)[1];
277 int type=((int *)cpy)[0];
278 unsigned int * pointer;
282 /* Nothing is inside */
287 pointer=pointerarray[type];
289 /* Array of primitives */
291 } else if (((int)pointer)==1) {
292 /* Array of pointers */
293 struct ArrayObject *ao=(struct ArrayObject *) ptr;
294 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
295 int length=ao->___length___;
297 for(i=0;i<length;i++) {
298 void *objptr=((void **)(((char *)& ao->___length___)+sizeof(int)))[i];
299 ENQUEUE(objptr, ((void **)(((char *)& ao_cpy->___length___)+sizeof(int)))[i]);
304 for(i=1;i<=size;i++) {
305 unsigned int offset=pointer[i];
306 void * objptr=*((void **)(((int)ptr)+offset));
307 ENQUEUE(objptr, *((void **) (((int)cpy)+offset)));
315 #if defined(THREADS)||defined(DSTM)
317 pthread_mutex_unlock(&gclistlock);
323 /* Fix up the references from tags. This can't be done earlier,
324 because we don't want tags to keep objects alive */
326 while(taghead!=NULL) {
328 struct pointerblock *tmp=taghead->next;
329 for(i=0;i<tagindex;i++) {
330 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
331 struct ___Object___ *obj=tagd->flagptr;
332 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
334 /* Zero object case */
335 } else if (obj->type==-1) {
336 /* Single object case */
337 copy->flagptr=((struct ___Object___**)obj)[1];
338 } else if (obj->type==OBJECTARRAYTYPE) {
340 struct ArrayObject *ao=(struct ArrayObject *) obj;
344 struct ArrayObject *aonew;
346 /* Count live objects */
347 for(j=0;j<ao->___cachedCode___;j++) {
348 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
353 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
354 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
355 aonew->type=OBJECTARRAYTYPE;
356 aonew->___length___=livecount;
358 for(j=0;j<ao->___cachedCode___;j++) {
359 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
360 if (tobj->type==-1) {
361 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
362 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
365 aonew->___cachedCode___=k;
368 /* No object live anymore */
379 void * tomalloc(int size) {
380 void * ptr=to_heapptr;
387 #if defined(THREADS)||defined(DSTM)
389 void checkcollect(void * ptr) {
391 struct listitem * tmp=stopforgc((struct garbagelist *)ptr);
392 pthread_mutex_lock(&gclock); // Wait for GC
394 pthread_mutex_unlock(&gclock);
399 struct listitem * stopforgc(struct garbagelist * ptr) {
400 struct listitem * litem=malloc(sizeof(struct listitem));
402 litem->locklist=pthread_getspecific(threadlocks);
404 pthread_mutex_lock(&gclistlock);
410 pthread_cond_signal(&gccond);
411 pthread_mutex_unlock(&gclistlock);
415 void restartaftergc(struct listitem * litem) {
416 pthread_mutex_lock(&gclistlock);
417 pthread_setspecific(threadlocks, litem->locklist);
418 if (litem->prev==NULL) {
421 litem->prev->next=litem->next;
423 if (litem->next!=NULL) {
424 litem->next->prev=litem->prev;
427 pthread_mutex_unlock(&gclistlock);
432 void * mygcmalloc(struct garbagelist * stackptr, int size) {
434 #if defined(THREADS)||defined(DSTM)
435 if (pthread_mutex_trylock(&gclock)!=0) {
436 struct listitem *tmp=stopforgc(stackptr);
437 pthread_mutex_lock(&gclock);
445 if (curr_heapptr>curr_heapgcpoint) {
446 if (curr_heapbase==0) {
447 /* Need to allocate base heap */
448 curr_heapbase=malloc(INITIALHEAPSIZE);
449 bzero(curr_heapbase, INITIALHEAPSIZE);
450 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
451 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
452 curr_heapptr=curr_heapbase+size;
454 to_heapbase=malloc(INITIALHEAPSIZE);
455 to_heaptop=to_heapbase+INITIALHEAPSIZE;
456 to_heapptr=to_heapbase;
458 #if defined(THREADS)||defined(DSTM)
459 pthread_mutex_unlock(&gclock);
464 /* Grow the to heap if necessary */
466 int curr_heapsize=curr_heaptop-curr_heapbase;
467 int to_heapsize=to_heaptop-to_heapbase;
470 last_heapsize=HEAPSIZE(lastgcsize, size);
471 if ((last_heapsize%4)!=0)
472 last_heapsize+=(4-(last_heapsize%4));
474 if (curr_heapsize>last_heapsize)
475 last_heapsize=curr_heapsize;
476 if (last_heapsize>to_heapsize) {
478 to_heapbase=malloc(last_heapsize);
479 to_heaptop=to_heapbase+last_heapsize;
480 to_heapptr=to_heapbase;
484 /* Do our collection */
487 /* Update stat on previous gc size */
488 lastgcsize=(to_heapptr-to_heapbase)+size;
490 /* Flip to/curr heaps */
492 void * tmp=to_heapbase;
493 to_heapbase=curr_heapbase;
497 to_heaptop=curr_heaptop;
501 curr_heapptr=to_heapptr+size;
502 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
503 to_heapptr=to_heapbase;
505 /* Not enough room :(, redo gc */
506 if (curr_heapptr>curr_heapgcpoint) {
507 #if defined(THREADS)||defined(DSTM)
508 pthread_mutex_unlock(&gclock);
510 return mygcmalloc(stackptr, size);
513 bzero(tmp, curr_heaptop-tmp);
514 #if defined(THREADS)||defined(DSTM)
515 pthread_mutex_unlock(&gclock);
520 #if defined(THREADS)||defined(DSTM)
521 pthread_mutex_unlock(&gclock);
528 int gc_createcopy(void * orig, void ** copy_ptr) {
533 int type=((int *)orig)[0];
535 *copy_ptr=((void **)orig)[1];
537 } if (type<NUMCLASSES) {
538 /* We have a normal object */
539 int size=classsize[type];
540 void *newobj=tomalloc(size);
541 memcpy(newobj, orig, size);
543 ((void **)orig)[1]=newobj;
547 /* We have an array */
548 struct ArrayObject *ao=(struct ArrayObject *)orig;
549 int elementsize=classsize[type];
550 int length=ao->___length___;
551 int size=sizeof(struct ArrayObject)+length*elementsize;
552 void *newobj=tomalloc(size);
553 memcpy(newobj, orig, size);
555 ((void **)orig)[1]=newobj;