3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
18 #include "workschedule.h"
19 extern volatile int numWorkSchedWorkers;
28 #include <DSTM/interface_recovery/dstm.h>
30 #include <DSTM/interface/dstm.h>
37 #include "delaycomp.h"
43 #ifndef INITIALHEAPSIZE_MB
44 #define INITIALHEAPSIZE_MB (256)
47 #define INITIALHEAPSIZE INITIALHEAPSIZE_MB*1024*1024L
48 #define GCPOINT(x) ((INTPTR)((x)*0.99))
49 /* This define takes in how full the heap is initially and returns a new heap size to use */
50 #define HEAPSIZE(x,y) ((INTPTR)(x+y))*2
53 extern struct genhashtable * activetasks;
55 extern struct parameterwrapper * objectqueues[NUMCLASSES];
57 extern struct genhashtable * failedtasks;
58 extern struct taskparamdescriptor *currtpd;
59 extern struct ctable *forward;
60 extern struct ctable *reverse;
61 extern struct RuntimeHash *fdtoobject;
66 long garbagearray[MAXSTATS];
69 #if defined(THREADS) || defined(DSTM) || defined(STM)||defined(MLP)
71 struct listitem * list=NULL;
74 __thread struct listitem litem;
79 __thread SESEcommon* seseCommon;
82 //Need to check if pointers are transaction pointers
83 //this also catches the special flag value of 1 for local copies
85 #define ENQUEUE(orig, dst) \
86 if ((!(((unsigned int)orig)&0x1))) { \
87 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
89 if (gc_createcopy(orig,©)) \
95 #define ENQUEUE(orig, dst) \
96 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
98 if (gc_createcopy(orig,©)) \
102 #define SENQUEUE(orig, dst) \
105 if (gc_createcopy(orig,©)) \
109 #elif defined(FASTCHECK)
110 #define ENQUEUE(orig, dst) \
111 if (((unsigned int)orig)!=1) { \
113 if (gc_createcopy(orig,©)) \
117 #define ENQUEUE(orig, dst) \
118 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
120 if (gc_createcopy(orig,©)) \
127 struct pointerblock {
128 void * ptrs[NUMPTRS];
129 struct pointerblock *next;
132 void * curr_heapbase=0;
133 void * curr_heapptr=0;
134 void * curr_heapgcpoint=0;
135 void * curr_heaptop=0;
137 void * to_heapbase=0;
143 struct pointerblock *head=NULL;
145 struct pointerblock *tail=NULL;
147 struct pointerblock *spare=NULL;
149 void enqueue(void *ptr) {
150 if (headindex==NUMPTRS) {
151 struct pointerblock * tmp;
155 } else tmp=malloc(sizeof(struct pointerblock));
160 head->ptrs[headindex++]=ptr;
164 if (tailindex==NUMPTRS) {
165 struct pointerblock *tmp=tail;
173 return tail->ptrs[tailindex++];
177 void fixobjlist(struct objlist * ptr) {
180 for(i=0;i<ptr->offset;i++) {
181 SENQUEUE(ptr->objs[i], ptr->objs[i]);
187 void fixtable(chashlistnode_t ** tc_table, chashlistnode_t **tc_list, cliststruct_t **cstr, unsigned int tc_size) {
188 unsigned int mask=(tc_size<<4)-1;
189 chashlistnode_t *node=calloc(tc_size, sizeof(chashlistnode_t));
190 chashlistnode_t *ptr=*tc_table;
191 chashlistnode_t *curr;
195 chashlistnode_t *newlist=NULL;
196 for(i=0;i<tc_size;i++) {
199 do { //Inner loop to go through linked lists
201 chashlistnode_t *tmp,*next;
203 if ((key=(void *)curr->key) == 0) { //Exit inner loop if there the first element is 0
204 break; //key = val =0 for element if not present within the hash table
207 if (curr->val>=curr_heapbase&&curr->val<curr_heaptop) {
208 SENQUEUE(curr->val, curr->val);
210 //rewrite transaction cache entry
211 void *vptr=curr->val;
212 int type=((int *)vptr)[0];
213 unsigned INTPTR *pointer=pointerarray[type];
215 //array of primitives - do nothing
216 struct ArrayObject *ao=(struct ArrayObject *) vptr;
217 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
218 } else if (((INTPTR)pointer)==1) {
220 struct ArrayObject *ao=(struct ArrayObject *) vptr;
221 int length=ao->___length___;
223 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
225 int lowindex=ao->lowindex;
226 int highindex=ao->highindex;
228 for(j=lowindex; j<=highindex; j++) {
229 unsigned int lockval;
230 GETLOCKVAL(lockval, ao, j);
231 if (lockval!=STMNONE) {
232 int lowi=(j<<INDEXSHIFT)/sizeof(void *);
233 int highi=lowi+(INDEXLENGTH/sizeof(void *));
234 for(i=lowi; i<highi;i++) {
236 for(i=0; i<length; i++) {
238 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
239 SENQUEUE(objptr, ((void **)(((char *)&ao->___length___)+sizeof(int)))[i]);
246 pointer=pointerarray[OBJECTTYPE];
247 //handle object class
248 INTPTR size=pointer[0];
250 for(i=1; i<=size; i++) {
251 unsigned int offset=pointer[i];
252 void * objptr=*((void **)(((char *)vptr)+offset));
253 SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
257 INTPTR size=pointer[0];
259 for(i=1; i<=size; i++) {
260 unsigned int offset=pointer[i];
261 void * objptr=*((void **)(((char *)vptr)+offset));
262 SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
268 index = (((unsigned INTPTR)key) & mask) >>4;
272 // Insert into the new table
274 tmp->key = curr->key;
275 tmp->val = curr->val;
278 } else if (isfirst) {
279 chashlistnode_t *newnode;
280 if ((*cstr)->num<NUMCLIST) {
281 newnode=&(*cstr)->array[(*cstr)->num];
285 cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
288 newnode=&tcl->array[0];
291 newnode->key = curr->key;
292 newnode->val = curr->val;
293 newnode->next = tmp->next;
294 newnode->lnext=newlist;
300 curr->next=tmp->next;
314 if ((head==tail)&&(tailindex==headindex))
320 struct pointerblock *taghead=NULL;
323 void enqueuetag(struct ___TagDescriptor___ *ptr) {
324 if (tagindex==NUMPTRS) {
325 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
330 taghead->ptrs[tagindex++]=ptr;
334 #if defined(STM)||defined(THREADS)||defined(MLP)
335 __thread char * memorybase=NULL;
336 __thread char * memorytop=NULL;
340 void collect(struct garbagelist * stackptr) {
341 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
343 pthread_mutex_lock(&gclistlock);
345 if ((listcount+1)==threadcount) {
346 break; /* Have all other threads stopped */
348 pthread_cond_wait(&gccond, &gclistlock);
352 ptrstack.prev=stackptr;
353 stackptr=(struct garbagelist *) &ptrstack;
354 #if defined(STMARRAY)&&!defined(DUALVIEW)
355 arraystack.prev=stackptr;
356 stackptr=(struct garbagelist *) &arraystack;
363 for(i=0;i<MAXSTATS;i++)
371 head=tail=malloc(sizeof(struct pointerblock));
377 taghead=malloc(sizeof(struct pointerblock));
384 fixtable(&c_table, &c_list, &c_structs, c_size);
387 fixobjlist(lockedobjs);
391 #if defined(STM)||defined(THREADS)||defined(MLP)
395 /* Check current stack */
396 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
398 struct listitem *listptr=list;
402 while(stackptr!=NULL) {
404 for(i=0; i<stackptr->size; i++) {
405 void * orig=stackptr->array[i];
406 ENQUEUE(orig, stackptr->array[i]);
408 stackptr=stackptr->next;
410 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
411 /* Go to next thread */
414 if (listptr==&litem) {
416 // update forward list & memory queue for the current SESE
417 updateForwardList(&((SESEcommon*)listptr->seseCommon)->forwardList,FALSE);
418 updateMemoryQueue((SESEcommon*)(listptr->seseCommon));
420 listptr=listptr->next;
424 struct listitem *litem=pthread_getspecific(litemkey);
425 if (listptr==litem) {
426 listptr=listptr->next;
433 void * orig=listptr->locklist;
434 ENQUEUE(orig, listptr->locklist);
437 if ((*listptr->tc_table)!=NULL) {
438 fixtable(listptr->tc_table, listptr->tc_list, listptr->tc_structs, listptr->tc_size);
439 fixobjlist(listptr->objlist);
441 fixobjlist(listptr->lockedlist);
445 #if defined(STM)||defined(THREADS)||defined(MLP)
446 *(listptr->base)=NULL;
449 // update forward list & memory queue for all running SESEs.
450 if (listptr->seseCommon!=NULL) {
451 updateForwardList(&((SESEcommon*)listptr->seseCommon)->forwardList,FALSE);
452 updateMemoryQueue((SESEcommon*)(listptr->seseCommon));
455 stackptr=listptr->stackptr;
456 listptr=listptr->next;
464 ENQUEUE(___fcrevert___, ___fcrevert___);
467 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
470 stackptr=(struct garbagelist *)global_defs_p;
471 for(i=0; i<stackptr->size; i++) {
472 void * orig=stackptr->array[i];
473 ENQUEUE(orig, stackptr->array[i]);
480 /* Update objectsets */
482 for(i=0; i<NUMCLASSES; i++) {
483 #if !defined(MULTICORE)
484 struct parameterwrapper * p=objectqueues[i];
486 struct ObjectHash * set=p->objectset;
487 struct ObjectNode * ptr=set->listhead;
489 void *orig=(void *)ptr->key;
490 ENQUEUE(orig, *((void **)(&ptr->key)));
493 ObjectHashrehash(set); /* Rehash the table */
502 struct cnode * ptr=forward->listhead;
504 void * orig=(void *)ptr->key;
505 ENQUEUE(orig, *((void **)(&ptr->key)));
508 crehash(forward); /* Rehash the table */
512 struct cnode * ptr=reverse->listhead;
514 void *orig=(void *)ptr->val;
515 ENQUEUE(orig, *((void**)(&ptr->val)));
522 struct RuntimeNode * ptr=fdtoobject->listhead;
524 void *orig=(void *)ptr->data;
525 ENQUEUE(orig, *((void**)(&ptr->data)));
531 /* Update current task descriptor */
533 for(i=0; i<currtpd->numParameters; i++) {
534 void *orig=currtpd->parameterArray[i];
535 ENQUEUE(orig, currtpd->parameterArray[i]);
540 /* Update active tasks */
542 struct genpointerlist * ptr=activetasks->list;
544 struct taskparamdescriptor *tpd=ptr->src;
546 for(i=0; i<tpd->numParameters; i++) {
547 void * orig=tpd->parameterArray[i];
548 ENQUEUE(orig, tpd->parameterArray[i]);
552 genrehash(activetasks);
555 /* Update failed tasks */
557 struct genpointerlist * ptr=failedtasks->list;
559 struct taskparamdescriptor *tpd=ptr->src;
561 for(i=0; i<tpd->numParameters; i++) {
562 void * orig=tpd->parameterArray[i];
563 ENQUEUE(orig, tpd->parameterArray[i]);
567 genrehash(failedtasks);
579 // goes over ready-to-run SESEs
580 for( i = 0; i < numWorkSchedWorkers; ++i ) {
586 // check all the relevant indices of this
587 // node in the deque, noting if we are in
588 // the top/bottom node which can be partially
592 //SESEcommon* common = (SESEcommon*) n->itsDataArr[j];
593 //if(common==seseCommon){
594 // skip the current running SESE
597 di=(dequeItem *) EXTRACTPTR((INTPTR)di);
598 SESEcommon* seseRec = (SESEcommon*) di->work;
600 struct garbagelist* gl = (struct garbagelist*) &(seseRec[1]);
601 struct garbagelist* glroot = gl;
603 updateAscendantSESE( seseRec );
605 while( gl != NULL ) {
607 for( k = 0; k < gl->size; k++ ) {
608 void* orig = gl->array[k];
609 ENQUEUE( orig, gl->array[k] );
614 // we only have to move across the nodes
615 // of the deque if the top and bottom are
616 // not the same already
618 } while( di !=NULL) ;
634 // goes over ready-to-run SESEs
635 for( i = 0; i < numWorkSchedWorkers; ++i ) {
638 botNode = dqDecodePtr( dq->bottom );
639 botIndx = dqDecodeIdx( dq->bottom );
641 topNode = dqDecodePtr( dq->top );
642 topIndx = dqDecodeIdx( dq->top );
647 // check all the relevant indices of this
648 // node in the deque, noting if we are in
649 // the top/bottom node which can be partially
651 if( n == botNode ) { jLo = botIndx; } else { jLo = 0; }
652 if( n == topNode ) { jHi = topIndx; } else { jHi = DQNODE_ARRAYSIZE; }
654 for( j = jLo; j < jHi; ++j ) {
657 //SESEcommon* common = (SESEcommon*) n->itsDataArr[j];
658 //if(common==seseCommon){
662 SESEcommon* seseRec = (SESEcommon*) n->itsDataArr[j];
664 struct garbagelist* gl = (struct garbagelist*) &(seseRec[1]);
665 struct garbagelist* glroot = gl;
667 updateAscendantSESE( seseRec );
669 while( gl != NULL ) {
671 for( k = 0; k < gl->size; k++ ) {
672 void* orig = gl->array[k];
673 ENQUEUE( orig, gl->array[k] );
679 // we only have to move across the nodes
680 // of the deque if the top and bottom are
681 // not the same already
682 if( botNode != topNode ) {
685 } while( n != topNode );
693 void * ptr=dequeue();
695 int type=((int *)cpy)[0];
696 unsigned INTPTR * pointer;
700 /* Nothing is inside */
705 pointer=pointerarray[type];
707 /* Array of primitives */
709 #if defined(DSTM)||defined(FASTCHECK)
710 struct ArrayObject *ao=(struct ArrayObject *) ptr;
711 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
712 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
713 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
716 struct ArrayObject *ao=(struct ArrayObject *) ptr;
717 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
718 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
720 } else if (((INTPTR)pointer)==1) {
721 /* Array of pointers */
722 struct ArrayObject *ao=(struct ArrayObject *) ptr;
723 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
724 #if (defined(DSTM)||defined(FASTCHECK))
725 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
726 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
729 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
731 int length=ao->___length___;
733 for(i=0; i<length; i++) {
734 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
735 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
738 INTPTR size=pointer[0];
740 for(i=1; i<=size; i++) {
741 unsigned int offset=pointer[i];
742 void * objptr=*((void **)(((char *)ptr)+offset));
743 ENQUEUE(objptr, *((void **)(((char *)cpy)+offset)));
751 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
753 pthread_mutex_unlock(&gclistlock);
758 /* Fix up the references from tags. This can't be done earlier,
759 because we don't want tags to keep objects alive */
761 while(taghead!=NULL) {
763 struct pointerblock *tmp=taghead->next;
764 for(i=0; i<tagindex; i++) {
765 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
766 struct ___Object___ *obj=tagd->flagptr;
767 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
769 /* Zero object case */
770 } else if (obj->type==-1) {
771 /* Single object case */
772 copy->flagptr=((struct ___Object___**)obj)[1];
773 } else if (obj->type==OBJECTARRAYTYPE) {
775 struct ArrayObject *ao=(struct ArrayObject *) obj;
779 struct ArrayObject *aonew;
781 /* Count live objects */
782 for(j=0; j<ao->___cachedCode___; j++) {
783 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
788 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
789 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
790 memcpy(aonew, ao, sizeof(struct ArrayObject));
791 aonew->type=OBJECTARRAYTYPE;
792 aonew->___length___=livecount;
794 for(j=0; j<ao->___cachedCode___; j++) {
795 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
796 if (tobj->type==-1) {
797 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
798 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
801 aonew->___cachedCode___=k;
802 for(; k<livecount; k++) {
803 ARRAYSET(aonew, struct ___Object___*, k, NULL);
806 /* No object live anymore */
817 void * tomalloc(int size) {
818 void * ptr=to_heapptr;
825 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
826 void checkcollect(void * ptr) {
827 stopforgc((struct garbagelist *)ptr);
832 void checkcollect2(void * ptr) {
833 int ptrarray[]={1, (int)ptr, (int) revertlist};
834 stopforgc((struct garbagelist *)ptrarray);
836 revertlist=(struct ___Object___*)ptrarray[2];
840 void stopforgc(struct garbagelist * ptr) {
842 //just append us to the list
844 ptr=(struct garbagelist *) &ptrstack;
845 #if defined(STMARRAY)&&!defined(DUALVIEW)
847 ptr=(struct garbagelist *) &arraystack;
853 litem.locklist=pthread_getspecific(threadlocks);
855 #if defined(STM)||defined(THREADS)||defined(MLP)
856 litem.base=&memorybase;
859 litem.tc_size=c_size;
860 litem.tc_table=&c_table;
861 litem.tc_list=&c_list;
862 litem.tc_structs=&c_structs;
863 litem.objlist=newobjs;
865 litem.lockedlist=lockedobjs;
870 struct listitem *litem=pthread_getspecific(litemkey);
873 litem->locklist=pthread_getspecific(threadlocks);
876 pthread_mutex_lock(&gclistlock);
878 if ((listcount+1)==threadcount) {
879 //only do wakeup if we are ready to GC
880 pthread_cond_signal(&gccond);
882 pthread_mutex_unlock(&gclistlock);
885 void restartaftergc() {
887 pthread_mutex_lock(&gclock); // Wait for GC
888 pthread_mutex_unlock(&gclock);
890 pthread_mutex_lock(&gclistlock);
892 pthread_mutex_unlock(&gclistlock);
895 struct listitem *litem=pthread_getspecific(litemkey);
896 pthread_setspecific(threadlocks,litem->locklist);
898 pthread_setspecific(threadlocks,litem.locklist);
904 #if defined(STM)||defined(THREADS)||defined(MLP)
905 #define MEMORYBLOCK 65536
906 void * helper(struct garbagelist *, int);
907 void * mygcmalloc(struct garbagelist * stackptr, int size) {
910 if (memorybase==NULL||size>(memorytop-memorybase)) {
911 int toallocate=(size>MEMORYBLOCK)?size:MEMORYBLOCK;
912 memorybase=helper(stackptr, toallocate);
913 bzero(memorybase, toallocate);
914 memorytop=memorybase+toallocate;
916 char *retvalue=memorybase;
921 void * helper(struct garbagelist * stackptr, int size) {
923 void * mygcmalloc(struct garbagelist * stackptr, int size) {
926 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
927 while (pthread_mutex_trylock(&gclock)!=0) {
936 if (curr_heapptr>curr_heapgcpoint||curr_heapptr<curr_heapbase) {
937 if (curr_heapbase==0) {
938 /* Need to allocate base heap */
939 curr_heapbase=malloc(INITIALHEAPSIZE);
940 if (curr_heapbase==NULL) {
941 printf("malloc failed. Garbage colletcor couldn't get enough memory. Try changing heap size.\n");
944 #if defined(STM)||defined(THREADS)||defined(MLP)
946 bzero(curr_heapbase, INITIALHEAPSIZE);
948 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
949 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
950 curr_heapptr=curr_heapbase+size;
952 to_heapbase=malloc(INITIALHEAPSIZE);
953 if (to_heapbase==NULL) {
954 printf("malloc failed. Garbage collector couldn't get enough memory. Try changing heap size.\n");
958 to_heaptop=to_heapbase+INITIALHEAPSIZE;
959 to_heapptr=to_heapbase;
961 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
962 pthread_mutex_unlock(&gclock);
967 /* Grow the to heap if necessary */
969 INTPTR curr_heapsize=curr_heaptop-curr_heapbase;
970 INTPTR to_heapsize=to_heaptop-to_heapbase;
971 INTPTR last_heapsize=0;
973 last_heapsize=HEAPSIZE(lastgcsize, size);
974 if ((last_heapsize&7)!=0)
975 last_heapsize+=(8-(last_heapsize%8));
977 if (curr_heapsize>last_heapsize)
978 last_heapsize=curr_heapsize;
979 if (last_heapsize>to_heapsize) {
981 to_heapbase=malloc(last_heapsize);
982 if (to_heapbase==NULL) {
983 printf("Error Allocating enough memory\n");
986 to_heaptop=to_heapbase+last_heapsize;
987 to_heapptr=to_heapbase;
991 /* Do our collection */
994 /* Update stat on previous gc size */
995 lastgcsize=(to_heapptr-to_heapbase)+size;
998 printf("Garbage collected: Old bytes: %u\n", curr_heapptr-curr_heapbase);
999 printf("New space: %u\n", to_heapptr-to_heapbase);
1000 printf("Total space: %u\n", to_heaptop-to_heapbase);
1003 for(i=0;i<MAXSTATS;i++) {
1004 if (garbagearray[i]!=0)
1005 printf("Type=%d Size=%u\n", i, garbagearray[i]);
1009 /* Flip to/curr heaps */
1011 void * tmp=to_heapbase;
1012 to_heapbase=curr_heapbase;
1016 to_heaptop=curr_heaptop;
1020 curr_heapptr=to_heapptr+size;
1021 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
1022 to_heapptr=to_heapbase;
1024 /* Not enough room :(, redo gc */
1025 if (curr_heapptr>curr_heapgcpoint) {
1026 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
1027 pthread_mutex_unlock(&gclock);
1029 return mygcmalloc(stackptr, size);
1032 bzero(tmp, curr_heaptop-tmp);
1033 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
1034 pthread_mutex_unlock(&gclock);
1039 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
1040 pthread_mutex_unlock(&gclock);
1046 int gc_createcopy(void * orig, void ** copy_ptr) {
1051 int type=((int *)orig)[0];
1053 *copy_ptr=((void **)orig)[1];
1056 if (type<NUMCLASSES) {
1057 /* We have a normal object */
1059 int size=classsize[type]+sizeof(objheader_t);
1060 void *newobj=tomalloc(size);
1061 memcpy(newobj,((char *) orig)-sizeof(objheader_t), size);
1062 newobj=((char *)newobj)+sizeof(objheader_t);
1064 int size=classsize[type];
1065 void *newobj=tomalloc(size);
1066 memcpy(newobj, orig, size);
1069 garbagearray[type]+=size;
1071 ((int *)orig)[0]=-1;
1072 ((void **)orig)[1]=newobj;
1076 /* We have an array */
1077 struct ArrayObject *ao=(struct ArrayObject *)orig;
1078 int elementsize=classsize[type];
1079 int length=ao->___length___;
1082 int basesize=length*elementsize;
1083 basesize=(basesize+LOWMASK)&HIGHMASK;
1084 int versionspace=sizeof(int)*2*(basesize>>INDEXSHIFT);
1085 int size=sizeof(struct ArrayObject)+basesize+sizeof(objheader_t)+versionspace;
1086 void *newobj=tomalloc(size);
1087 memcpy(newobj, ((char*)orig)-sizeof(objheader_t)-versionspace, size);
1088 newobj=((char *)newobj)+sizeof(objheader_t)+versionspace;
1090 int size=sizeof(struct ArrayObject)+length*elementsize+sizeof(objheader_t);
1091 void *newobj=tomalloc(size);
1092 memcpy(newobj, ((char*)orig)-sizeof(objheader_t), size);
1093 newobj=((char *)newobj)+sizeof(objheader_t);
1096 int size=sizeof(struct ArrayObject)+length*elementsize;
1097 void *newobj=tomalloc(size);
1098 memcpy(newobj, orig, size);
1101 garbagearray[type]+=size;
1103 ((int *)orig)[0]=-1;
1104 ((void **)orig)[1]=newobj;
1112 updateForwardList(struct Queue *forwardList, int prevUpdate){
1114 struct QueueItem * fqItem=getHead(forwardList);
1115 while(fqItem!=NULL){
1116 SESEcommon* seseRec = (SESEcommon*)(fqItem->objectptr);
1117 struct garbagelist * gl=(struct garbagelist *)&(seseRec[1]);
1118 if(prevUpdate==TRUE){
1119 updateAscendantSESE(seseRec);
1121 // do something here
1124 for(i=0; i<gl->size; i++) {
1125 void * orig=gl->array[i];
1126 ENQUEUE(orig, gl->array[i]);
1130 // iterate forwarding list of seseRec
1131 struct Queue* fList=&seseRec->forwardList;
1132 updateForwardList(fList,prevUpdate);
1133 fqItem=getNextQueueItem(fqItem);
1138 updateMemoryQueue(SESEcommon* seseParent){
1139 // update memory queue
1141 for(i=0; i<seseParent->numMemoryQueue; i++){
1142 MemoryQueue *memoryQueue=seseParent->memoryQueueArray[i];
1143 MemoryQueueItem *memoryItem=memoryQueue->head;
1144 while(memoryItem!=NULL){
1145 if(memoryItem->type==HASHTABLE){
1146 Hashtable *ht=(Hashtable*)memoryItem;
1147 for(binidx=0; binidx<NUMBINS; binidx++){
1148 BinElement *bin=ht->array[binidx];
1149 BinItem *binItem=bin->head;
1150 while(binItem!=NULL){
1151 if(binItem->type==READBIN){
1152 ReadBinItem* readBinItem=(ReadBinItem*)binItem;
1154 for(ridx=0; ridx<readBinItem->index; ridx++){
1155 REntry *rentry=readBinItem->array[ridx];
1156 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1157 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1158 updateAscendantSESE(seseRec);
1161 for(i=0; i<gl->size; i++) {
1162 void * orig=gl->array[i];
1163 ENQUEUE(orig, gl->array[i]);
1169 REntry *rentry=((WriteBinItem*)binItem)->val;
1170 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1171 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1172 updateAscendantSESE(seseRec);
1175 for(i=0; i<gl->size; i++) {
1176 void * orig=gl->array[i];
1177 ENQUEUE(orig, gl->array[i]);
1182 binItem=binItem->next;
1185 }else if(memoryItem->type==VECTOR){
1186 Vector *vt=(Vector*)memoryItem;
1188 for(idx=0; idx<vt->index; idx++){
1189 REntry *rentry=vt->array[idx];
1191 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1192 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1193 updateAscendantSESE(seseRec);
1196 for(i=0; i<gl->size; i++) {
1197 void * orig=gl->array[i];
1198 ENQUEUE(orig, gl->array[i]);
1204 }else if(memoryItem->type==SINGLEITEM){
1205 SCC *scc=(SCC*)memoryItem;
1206 REntry *rentry=scc->val;
1208 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1209 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1210 updateAscendantSESE(seseRec);
1213 for(i=0; i<gl->size; i++) {
1214 void * orig=gl->array[i];
1215 ENQUEUE(orig, gl->array[i]);
1221 memoryItem=memoryItem->next;
1226 updateAscendantSESE(SESEcommon* seseRec){
1228 for(prevIdx=0; prevIdx<(seseRec->numDependentSESErecords); prevIdx++){
1229 SESEcommon* prevSESE = (SESEcommon*)
1232 seseRec->offsetToDepSESErecords +
1233 (sizeof(INTPTR)*prevIdx)
1237 struct garbagelist * prevgl=(struct garbagelist *)&(((SESEcommon*)(prevSESE))[1]);
1238 while(prevgl!=NULL) {
1240 for(i=0; i<prevgl->size; i++) {
1241 void * orig=prevgl->array[i];
1242 ENQUEUE(orig, prevgl->array[i]);
1244 prevgl=prevgl->next;
1252 int within(void *ptr){ //debug function
1253 if(ptr>curr_heapptr || ptr<curr_heapbase){
1254 __asm__ __volatile__ ("int $3"); // breakpoint