3 #include "structdefs.h"
5 #include "SimpleHash.h"
6 #include "GenericHashtable.h"
8 #if defined(THREADS) || defined(DSTM)
19 #define INITIALHEAPSIZE 10*1024
20 #define GCPOINT(x) ((int)((x)*0.9))
21 /* This define takes in how full the heap is initially and returns a new heap size to use */
22 #define HEAPSIZE(x,y) (((int)((x)/0.6))+y)
25 extern struct genhashtable * activetasks;
26 extern struct parameterwrapper * objectqueues[NUMCLASSES];
27 extern struct genhashtable * failedtasks;
28 extern struct taskparamdescriptor *currtpd;
29 extern struct RuntimeHash *forward;
30 extern struct RuntimeHash *reverse;
31 extern struct RuntimeHash *fdtoobject;
34 #if defined(THREADS) || defined(DSTM)
36 struct listitem * list=NULL;
40 //Need to check if pointers are transaction pointers
42 #define ENQUEUE(orig, dst) \
43 if ((!(((unsigned int)orig)&0x1))) {\
44 if (orig>=curr_heapbase&&orig<curr_heaptop) {\
46 if (gc_createcopy(orig,©))\
52 #define ENQUEUE(orig, dst) \
54 if (gc_createcopy(orig,©))\
61 struct pointerblock *next;
64 void * curr_heapbase=0;
65 void * curr_heapptr=0;
66 void * curr_heapgcpoint=0;
67 void * curr_heaptop=0;
74 struct pointerblock *head=NULL;
76 struct pointerblock *tail=NULL;
78 struct pointerblock *spare=NULL;
80 void enqueue(void *ptr) {
81 if (headindex==NUMPTRS) {
82 struct pointerblock * tmp;
87 tmp=malloc(sizeof(struct pointerblock));
92 head->ptrs[headindex++]=ptr;
96 if (tailindex==NUMPTRS) {
97 struct pointerblock *tmp=tail;
105 return tail->ptrs[tailindex++];
109 if ((head==tail)&&(tailindex==headindex))
115 struct pointerblock *taghead=NULL;
118 void enqueuetag(struct ___TagDescriptor___ *ptr) {
119 if (tagindex==NUMPTRS) {
120 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
125 taghead->ptrs[tagindex++]=ptr;
130 void collect(struct garbagelist * stackptr) {
131 #if defined(THREADS)||defined(DSTM)
133 pthread_mutex_lock(&gclistlock);
135 if ((listcount+1)==threadcount) {
136 break; /* Have all other threads stopped */
138 pthread_cond_wait(&gccond, &gclistlock);
145 head=tail=malloc(sizeof(struct pointerblock));
151 taghead=malloc(sizeof(struct pointerblock));
156 /* Check current stack */
157 #if defined(THREADS)||defined(DSTM)
159 struct listitem *listptr=list;
163 while(stackptr!=NULL) {
165 for(i=0;i<stackptr->size;i++) {
166 void * orig=stackptr->array[i];
167 ENQUEUE(orig, stackptr->array[i]);
169 stackptr=stackptr->next;
171 #if defined(THREADS)||defined(DSTM)
172 /* Go to next thread */
174 void * orig=listptr->locklist;
175 ENQUEUE(orig, listptr->locklist);
176 stackptr=listptr->stackptr;
177 listptr=listptr->next;
186 /* Update objectsets */
188 for(i=0;i<NUMCLASSES;i++) {
189 struct parameterwrapper * p=objectqueues[i];
191 struct ObjectHash * set=p->objectset;
192 struct ObjectNode * ptr=set->listhead;
194 void *orig=(void *)ptr->key;
195 ENQUEUE(orig, *((void **)(&ptr->key)));
198 ObjectHashrehash(set); /* Rehash the table */
205 struct RuntimeNode * ptr=forward->listhead;
207 void * orig=(void *)ptr->key;
208 ENQUEUE(orig, *((void **)(&ptr->key)));
211 RuntimeHashrehash(forward); /* Rehash the table */
215 struct RuntimeNode * ptr=reverse->listhead;
217 void *orig=(void *)ptr->data;
218 ENQUEUE(orig, *((void**)(&ptr->data)));
224 struct RuntimeNode * ptr=fdtoobject->listhead;
226 void *orig=(void *)ptr->data;
227 ENQUEUE(orig, *((void**)(&ptr->data)));
233 /* Update current task descriptor */
235 for(i=0;i<currtpd->numParameters;i++) {
236 void *orig=currtpd->parameterArray[i];
237 ENQUEUE(orig, currtpd->parameterArray[i]);
242 /* Update active tasks */
244 struct genpointerlist * ptr=activetasks->list;
246 struct taskparamdescriptor *tpd=ptr->src;
248 for(i=0;i<tpd->numParameters;i++) {
249 void * orig=tpd->parameterArray[i];
250 ENQUEUE(orig, tpd->parameterArray[i]);
254 genrehash(activetasks);
257 /* Update failed tasks */
259 struct genpointerlist * ptr=failedtasks->list;
261 struct taskparamdescriptor *tpd=ptr->src;
263 for(i=0;i<tpd->numParameters;i++) {
264 void * orig=tpd->parameterArray[i];
265 ENQUEUE(orig, tpd->parameterArray[i]);
269 genrehash(failedtasks);
274 void * ptr=dequeue();
275 void *cpy=((void **)ptr)[1];
276 int type=((int *)cpy)[0];
277 unsigned int * pointer;
281 /* Nothing is inside */
286 pointer=pointerarray[type];
288 /* Array of primitives */
290 } else if (((int)pointer)==1) {
291 /* Array of pointers */
292 struct ArrayObject *ao=(struct ArrayObject *) ptr;
293 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
294 int length=ao->___length___;
296 for(i=0;i<length;i++) {
297 void *objptr=((void **)(((char *)& ao->___length___)+sizeof(int)))[i];
298 ENQUEUE(objptr, ((void **)(((char *)& ao_cpy->___length___)+sizeof(int)))[i]);
303 for(i=1;i<=size;i++) {
304 unsigned int offset=pointer[i];
305 void * objptr=*((void **)(((int)ptr)+offset));
306 ENQUEUE(objptr, *((void **) (((int)cpy)+offset)));
314 #if defined(THREADS)||defined(DSTM)
316 pthread_mutex_unlock(&gclistlock);
322 /* Fix up the references from tags. This can't be done earlier,
323 because we don't want tags to keep objects alive */
325 while(taghead!=NULL) {
327 struct pointerblock *tmp=taghead->next;
328 for(i=0;i<tagindex;i++) {
329 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
330 struct ___Object___ *obj=tagd->flagptr;
331 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
333 /* Zero object case */
334 } else if (obj->type==-1) {
335 /* Single object case */
336 copy->flagptr=((struct ___Object___**)obj)[1];
337 } else if (obj->type==OBJECTARRAYTYPE) {
339 struct ArrayObject *ao=(struct ArrayObject *) obj;
343 struct ArrayObject *aonew;
345 /* Count live objects */
346 for(j=0;j<ao->___cachedCode___;j++) {
347 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
352 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
353 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
354 memcpy(aonew, ao, sizeof(struct ArrayObject));
355 aonew->type=OBJECTARRAYTYPE;
356 aonew->___length___=livecount;
358 for(j=0;j<ao->___cachedCode___;j++) {
359 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
360 if (tobj->type==-1) {
361 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
362 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
365 aonew->___cachedCode___=k;
366 for(;k<livecount;k++) {
367 ARRAYSET(aonew, struct ___Object___*, k, NULL);
370 /* No object live anymore */
381 void * tomalloc(int size) {
382 void * ptr=to_heapptr;
389 #if defined(THREADS)||defined(DSTM)
391 void checkcollect(void * ptr) {
393 struct listitem * tmp=stopforgc((struct garbagelist *)ptr);
394 pthread_mutex_lock(&gclock); // Wait for GC
396 pthread_mutex_unlock(&gclock);
401 struct listitem * stopforgc(struct garbagelist * ptr) {
402 struct listitem * litem=malloc(sizeof(struct listitem));
404 litem->locklist=pthread_getspecific(threadlocks);
406 pthread_mutex_lock(&gclistlock);
412 pthread_cond_signal(&gccond);
413 pthread_mutex_unlock(&gclistlock);
417 void restartaftergc(struct listitem * litem) {
418 pthread_mutex_lock(&gclistlock);
419 pthread_setspecific(threadlocks, litem->locklist);
420 if (litem->prev==NULL) {
423 litem->prev->next=litem->next;
425 if (litem->next!=NULL) {
426 litem->next->prev=litem->prev;
429 pthread_mutex_unlock(&gclistlock);
434 void * mygcmalloc(struct garbagelist * stackptr, int size) {
436 #if defined(THREADS)||defined(DSTM)
437 if (pthread_mutex_trylock(&gclock)!=0) {
438 struct listitem *tmp=stopforgc(stackptr);
439 pthread_mutex_lock(&gclock);
447 if (curr_heapptr>curr_heapgcpoint) {
448 if (curr_heapbase==0) {
449 /* Need to allocate base heap */
450 curr_heapbase=malloc(INITIALHEAPSIZE);
451 bzero(curr_heapbase, INITIALHEAPSIZE);
452 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
453 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
454 curr_heapptr=curr_heapbase+size;
456 to_heapbase=malloc(INITIALHEAPSIZE);
457 to_heaptop=to_heapbase+INITIALHEAPSIZE;
458 to_heapptr=to_heapbase;
460 #if defined(THREADS)||defined(DSTM)
461 pthread_mutex_unlock(&gclock);
466 /* Grow the to heap if necessary */
468 int curr_heapsize=curr_heaptop-curr_heapbase;
469 int to_heapsize=to_heaptop-to_heapbase;
472 last_heapsize=HEAPSIZE(lastgcsize, size);
473 if ((last_heapsize%4)!=0)
474 last_heapsize+=(4-(last_heapsize%4));
476 if (curr_heapsize>last_heapsize)
477 last_heapsize=curr_heapsize;
478 if (last_heapsize>to_heapsize) {
480 to_heapbase=malloc(last_heapsize);
481 to_heaptop=to_heapbase+last_heapsize;
482 to_heapptr=to_heapbase;
486 /* Do our collection */
489 /* Update stat on previous gc size */
490 lastgcsize=(to_heapptr-to_heapbase)+size;
492 /* Flip to/curr heaps */
494 void * tmp=to_heapbase;
495 to_heapbase=curr_heapbase;
499 to_heaptop=curr_heaptop;
503 curr_heapptr=to_heapptr+size;
504 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
505 to_heapptr=to_heapbase;
507 /* Not enough room :(, redo gc */
508 if (curr_heapptr>curr_heapgcpoint) {
509 #if defined(THREADS)||defined(DSTM)
510 pthread_mutex_unlock(&gclock);
512 return mygcmalloc(stackptr, size);
515 bzero(tmp, curr_heaptop-tmp);
516 #if defined(THREADS)||defined(DSTM)
517 pthread_mutex_unlock(&gclock);
522 #if defined(THREADS)||defined(DSTM)
523 pthread_mutex_unlock(&gclock);
530 int gc_createcopy(void * orig, void ** copy_ptr) {
535 int type=((int *)orig)[0];
537 *copy_ptr=((void **)orig)[1];
539 } if (type<NUMCLASSES) {
540 /* We have a normal object */
541 int size=classsize[type];
542 void *newobj=tomalloc(size);
543 memcpy(newobj, orig, size);
545 ((void **)orig)[1]=newobj;
549 /* We have an array */
550 struct ArrayObject *ao=(struct ArrayObject *)orig;
551 int elementsize=classsize[type];
552 int length=ao->___length___;
553 int size=sizeof(struct ArrayObject)+length*elementsize;
554 void *newobj=tomalloc(size);
555 memcpy(newobj, orig, size);
557 ((void **)orig)[1]=newobj;