3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
25 #define INITIALHEAPSIZE 32*1024*1024
26 #define GCPOINT(x) ((int)((x)*0.9))
27 /* This define takes in how full the heap is initially and returns a new heap size to use */
28 #define HEAPSIZE(x,y) (((int)((x)/0.6))+y)
31 extern struct genhashtable * activetasks;
33 extern struct parameterwrapper * objectqueues[NUMCLASSES];
35 extern struct genhashtable * failedtasks;
36 extern struct taskparamdescriptor *currtpd;
37 extern struct ctable *forward;
38 extern struct ctable *reverse;
39 extern struct RuntimeHash *fdtoobject;
42 #if defined(THREADS) || defined(DSTM) || defined(STM)
44 struct listitem * list=NULL;
48 //Need to check if pointers are transaction pointers
49 //this also catches the special flag value of 1 for local copies
51 #define ENQUEUE(orig, dst) \
52 if ((!(((unsigned int)orig)&0x1))) { \
53 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
55 if (gc_createcopy(orig,©)) \
61 #define ENQUEUE(orig, dst) \
62 if ((!(((unsigned int)orig)&0x1))) { \
63 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
65 if (gc_createcopy(orig,©)) \
70 #elif defined(FASTCHECK)
71 #define ENQUEUE(orig, dst) \
72 if (((unsigned int)orig)!=1) { \
74 if (gc_createcopy(orig,©)) \
78 #define ENQUEUE(orig, dst) \
80 if (gc_createcopy(orig,©)) \
87 struct pointerblock *next;
90 void * curr_heapbase=0;
91 void * curr_heapptr=0;
92 void * curr_heapgcpoint=0;
93 void * curr_heaptop=0;
100 struct pointerblock *head=NULL;
102 struct pointerblock *tail=NULL;
104 struct pointerblock *spare=NULL;
106 void enqueue(void *ptr) {
107 if (headindex==NUMPTRS) {
108 struct pointerblock * tmp;
113 tmp=malloc(sizeof(struct pointerblock));
118 head->ptrs[headindex++]=ptr;
122 if (tailindex==NUMPTRS) {
123 struct pointerblock *tmp=tail;
131 return tail->ptrs[tailindex++];
135 if ((head==tail)&&(tailindex==headindex))
141 struct pointerblock *taghead=NULL;
144 void enqueuetag(struct ___TagDescriptor___ *ptr) {
145 if (tagindex==NUMPTRS) {
146 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
151 taghead->ptrs[tagindex++]=ptr;
156 void collect(struct garbagelist * stackptr) {
157 #if defined(THREADS)||defined(DSTM)||defined(STM)
159 pthread_mutex_lock(&gclistlock);
161 if ((listcount+1)==threadcount) {
162 break; /* Have all other threads stopped */
164 pthread_cond_wait(&gccond, &gclistlock);
171 head=tail=malloc(sizeof(struct pointerblock));
177 taghead=malloc(sizeof(struct pointerblock));
182 /* Check current stack */
183 #if defined(THREADS)||defined(DSTM)||defined(STM)
185 struct listitem *listptr=list;
189 while(stackptr!=NULL) {
191 for(i=0; i<stackptr->size; i++) {
192 void * orig=stackptr->array[i];
193 ENQUEUE(orig, stackptr->array[i]);
195 stackptr=stackptr->next;
197 #if defined(THREADS)||defined(DSTM)||defined(STM)
198 /* Go to next thread */
201 void * orig=listptr->locklist;
202 ENQUEUE(orig, listptr->locklist);
204 stackptr=listptr->stackptr;
205 listptr=listptr->next;
213 ENQUEUE(___fcrevert___, ___fcrevert___);
218 /* Update objectsets */
220 for(i=0; i<NUMCLASSES; i++) {
221 #if !defined(MULTICORE)
222 struct parameterwrapper * p=objectqueues[i];
224 struct ObjectHash * set=p->objectset;
225 struct ObjectNode * ptr=set->listhead;
227 void *orig=(void *)ptr->key;
228 ENQUEUE(orig, *((void **)(&ptr->key)));
231 ObjectHashrehash(set); /* Rehash the table */
240 struct cnode * ptr=forward->listhead;
242 void * orig=(void *)ptr->key;
243 ENQUEUE(orig, *((void **)(&ptr->key)));
246 crehash(forward); /* Rehash the table */
250 struct cnode * ptr=reverse->listhead;
252 void *orig=(void *)ptr->val;
253 ENQUEUE(orig, *((void**)(&ptr->val)));
260 struct RuntimeNode * ptr=fdtoobject->listhead;
262 void *orig=(void *)ptr->data;
263 ENQUEUE(orig, *((void**)(&ptr->data)));
269 /* Update current task descriptor */
271 for(i=0; i<currtpd->numParameters; i++) {
272 void *orig=currtpd->parameterArray[i];
273 ENQUEUE(orig, currtpd->parameterArray[i]);
278 /* Update active tasks */
280 struct genpointerlist * ptr=activetasks->list;
282 struct taskparamdescriptor *tpd=ptr->src;
284 for(i=0; i<tpd->numParameters; i++) {
285 void * orig=tpd->parameterArray[i];
286 ENQUEUE(orig, tpd->parameterArray[i]);
290 genrehash(activetasks);
293 /* Update failed tasks */
295 struct genpointerlist * ptr=failedtasks->list;
297 struct taskparamdescriptor *tpd=ptr->src;
299 for(i=0; i<tpd->numParameters; i++) {
300 void * orig=tpd->parameterArray[i];
301 ENQUEUE(orig, tpd->parameterArray[i]);
305 genrehash(failedtasks);
310 void * ptr=dequeue();
311 void *cpy=((void **)ptr)[1];
312 int type=((int *)cpy)[0];
313 unsigned int * pointer;
317 /* Nothing is inside */
322 pointer=pointerarray[type];
324 /* Array of primitives */
326 #if defined(DSTM)||defined(FASTCHECK)
327 struct ArrayObject *ao=(struct ArrayObject *) ptr;
328 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
329 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
330 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
332 } else if (((int)pointer)==1) {
333 /* Array of pointers */
334 struct ArrayObject *ao=(struct ArrayObject *) ptr;
335 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
336 #if (defined(DSTM)||defined(FASTCHECK))
337 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
338 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
340 int length=ao->___length___;
342 for(i=0; i<length; i++) {
343 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
344 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
349 for(i=1; i<=size; i++) {
350 unsigned int offset=pointer[i];
351 void * objptr=*((void **)(((int)ptr)+offset));
352 ENQUEUE(objptr, *((void **)(((int)cpy)+offset)));
360 #if defined(THREADS)||defined(DSTM)
362 pthread_mutex_unlock(&gclistlock);
367 /* Fix up the references from tags. This can't be done earlier,
368 because we don't want tags to keep objects alive */
370 while(taghead!=NULL) {
372 struct pointerblock *tmp=taghead->next;
373 for(i=0; i<tagindex; i++) {
374 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
375 struct ___Object___ *obj=tagd->flagptr;
376 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
378 /* Zero object case */
379 } else if (obj->type==-1) {
380 /* Single object case */
381 copy->flagptr=((struct ___Object___**)obj)[1];
382 } else if (obj->type==OBJECTARRAYTYPE) {
384 struct ArrayObject *ao=(struct ArrayObject *) obj;
388 struct ArrayObject *aonew;
390 /* Count live objects */
391 for(j=0; j<ao->___cachedCode___; j++) {
392 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
397 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
398 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
399 memcpy(aonew, ao, sizeof(struct ArrayObject));
400 aonew->type=OBJECTARRAYTYPE;
401 aonew->___length___=livecount;
403 for(j=0; j<ao->___cachedCode___; j++) {
404 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
405 if (tobj->type==-1) {
406 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
407 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
410 aonew->___cachedCode___=k;
411 for(; k<livecount; k++) {
412 ARRAYSET(aonew, struct ___Object___*, k, NULL);
415 /* No object live anymore */
426 void * tomalloc(int size) {
427 void * ptr=to_heapptr;
434 #if defined(THREADS)||defined(DSTM)||defined(STM)
435 void checkcollect(void * ptr) {
436 struct listitem * tmp=stopforgc((struct garbagelist *)ptr);
437 pthread_mutex_lock(&gclock); // Wait for GC
439 pthread_mutex_unlock(&gclock);
443 void checkcollect2(void * ptr) {
444 int ptrarray[]={1, (int)ptr, (int) revertlist};
445 struct listitem * tmp=stopforgc((struct garbagelist *)ptrarray);
446 pthread_mutex_lock(&gclock); // Wait for GC
448 pthread_mutex_unlock(&gclock);
449 revertlist=(struct ___Object___*)ptrarray[2];
454 struct listitem * stopforgc(struct garbagelist * ptr) {
455 struct listitem * litem=malloc(sizeof(struct listitem));
458 litem->locklist=pthread_getspecific(threadlocks);
461 litem->tc_size=c_size;
462 litem->tc_mask=c_mask;
463 litem->tc_table=&c_table;
466 pthread_mutex_lock(&gclistlock);
472 pthread_cond_signal(&gccond);
473 pthread_mutex_unlock(&gclistlock);
477 void restartaftergc(struct listitem * litem) {
478 pthread_mutex_lock(&gclistlock);
480 pthread_setspecific(threadlocks, litem->locklist);
482 if (litem->prev==NULL) {
485 litem->prev->next=litem->next;
487 if (litem->next!=NULL) {
488 litem->next->prev=litem->prev;
491 pthread_mutex_unlock(&gclistlock);
496 void * mygcmalloc(struct garbagelist * stackptr, int size) {
498 #if defined(THREADS)||defined(DSTM)||defined(STM)
499 if (pthread_mutex_trylock(&gclock)!=0) {
500 struct listitem *tmp=stopforgc(stackptr);
501 pthread_mutex_lock(&gclock);
509 if (curr_heapptr>curr_heapgcpoint) {
510 if (curr_heapbase==0) {
511 /* Need to allocate base heap */
512 curr_heapbase=malloc(INITIALHEAPSIZE);
513 bzero(curr_heapbase, INITIALHEAPSIZE);
514 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
515 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
516 curr_heapptr=curr_heapbase+size;
518 to_heapbase=malloc(INITIALHEAPSIZE);
519 to_heaptop=to_heapbase+INITIALHEAPSIZE;
520 to_heapptr=to_heapbase;
522 #if defined(THREADS)||defined(DSTM)||defined(STM)
523 pthread_mutex_unlock(&gclock);
528 /* Grow the to heap if necessary */
530 int curr_heapsize=curr_heaptop-curr_heapbase;
531 int to_heapsize=to_heaptop-to_heapbase;
534 last_heapsize=HEAPSIZE(lastgcsize, size);
535 if ((last_heapsize&7)!=0)
536 last_heapsize+=(8-(last_heapsize%8));
538 if (curr_heapsize>last_heapsize)
539 last_heapsize=curr_heapsize;
540 if (last_heapsize>to_heapsize) {
542 to_heapbase=malloc(last_heapsize);
543 if (to_heapbase==NULL) {
544 printf("Error Allocating enough memory\n");
547 to_heaptop=to_heapbase+last_heapsize;
548 to_heapptr=to_heapbase;
552 /* Do our collection */
555 /* Update stat on previous gc size */
556 lastgcsize=(to_heapptr-to_heapbase)+size;
558 /* Flip to/curr heaps */
560 void * tmp=to_heapbase;
561 to_heapbase=curr_heapbase;
565 to_heaptop=curr_heaptop;
569 curr_heapptr=to_heapptr+size;
570 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
571 to_heapptr=to_heapbase;
573 /* Not enough room :(, redo gc */
574 if (curr_heapptr>curr_heapgcpoint) {
575 #if defined(THREADS)||defined(DSTM)||defined(STM)
576 pthread_mutex_unlock(&gclock);
578 return mygcmalloc(stackptr, size);
581 bzero(tmp, curr_heaptop-tmp);
582 #if defined(THREADS)||defined(DSTM)||defined(STM)
583 pthread_mutex_unlock(&gclock);
588 #if defined(THREADS)||defined(DSTM)||defined(STM)
589 pthread_mutex_unlock(&gclock);
596 int gc_createcopy(void * orig, void ** copy_ptr) {
601 int type=((int *)orig)[0];
603 *copy_ptr=((void **)orig)[1];
606 if (type<NUMCLASSES) {
607 /* We have a normal object */
608 int size=classsize[type];
609 void *newobj=tomalloc(size);
610 memcpy(newobj, orig, size);
612 ((void **)orig)[1]=newobj;
616 /* We have an array */
617 struct ArrayObject *ao=(struct ArrayObject *)orig;
618 int elementsize=classsize[type];
619 int length=ao->___length___;
620 int size=sizeof(struct ArrayObject)+length*elementsize;
621 void *newobj=tomalloc(size);
622 memcpy(newobj, orig, size);
624 ((void **)orig)[1]=newobj;