3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
18 #include "workschedule.h"
19 extern volatile int numWorkSchedWorkers;
28 #include <DSTM/interface_recovery/dstm.h>
30 #include <DSTM/interface/dstm.h>
37 #include "delaycomp.h"
43 #ifndef INITIALHEAPSIZE_MB
44 #define INITIALHEAPSIZE_MB (256)
47 #define INITIALHEAPSIZE INITIALHEAPSIZE_MB*1024*1024L
48 #define GCPOINT(x) ((INTPTR)((x)*0.99))
49 /* This define takes in how full the heap is initially and returns a new heap size to use */
50 #define HEAPSIZE(x,y) ((INTPTR)(x+y))*2
53 extern struct genhashtable * activetasks;
55 extern struct parameterwrapper * objectqueues[NUMCLASSES];
57 extern struct genhashtable * failedtasks;
58 extern struct taskparamdescriptor *currtpd;
59 extern struct ctable *forward;
60 extern struct ctable *reverse;
61 extern struct RuntimeHash *fdtoobject;
66 long garbagearray[MAXSTATS];
69 #if defined(THREADS) || defined(DSTM) || defined(STM)||defined(MLP)
71 struct listitem * list=NULL;
74 __thread struct listitem litem;
79 __thread SESEcommon* seseCommon;
82 //Need to check if pointers are transaction pointers
83 //this also catches the special flag value of 1 for local copies
85 #define ENQUEUE(orig, dst) \
86 if ((!(((unsigned int)orig)&0x1))) { \
87 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
89 if (gc_createcopy(orig,©)) \
95 #define ENQUEUE(orig, dst) \
96 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
98 if (gc_createcopy(orig,©)) \
102 #define SENQUEUE(orig, dst) \
105 if (gc_createcopy(orig,©)) \
109 #elif defined(FASTCHECK)
110 #define ENQUEUE(orig, dst) \
111 if (((unsigned int)orig)!=1) { \
113 if (gc_createcopy(orig,©)) \
117 #define ENQUEUE(orig, dst) \
118 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
120 if (gc_createcopy(orig,©)) \
127 struct pointerblock {
128 void * ptrs[NUMPTRS];
129 struct pointerblock *next;
132 void * curr_heapbase=0;
133 void * curr_heapptr=0;
134 void * curr_heapgcpoint=0;
135 void * curr_heaptop=0;
137 void * to_heapbase=0;
143 struct pointerblock *head=NULL;
145 struct pointerblock *tail=NULL;
147 struct pointerblock *spare=NULL;
149 void enqueue(void *ptr) {
150 if (headindex==NUMPTRS) {
151 struct pointerblock * tmp;
155 } else tmp=malloc(sizeof(struct pointerblock));
160 head->ptrs[headindex++]=ptr;
164 if (tailindex==NUMPTRS) {
165 struct pointerblock *tmp=tail;
173 return tail->ptrs[tailindex++];
177 void fixobjlist(struct objlist * ptr) {
180 for(i=0;i<ptr->offset;i++) {
181 SENQUEUE(ptr->objs[i], ptr->objs[i]);
187 void fixtable(chashlistnode_t ** tc_table, chashlistnode_t **tc_list, cliststruct_t **cstr, unsigned int tc_size) {
188 unsigned int mask=(tc_size<<4)-1;
189 chashlistnode_t *node=calloc(tc_size, sizeof(chashlistnode_t));
190 chashlistnode_t *ptr=*tc_table;
191 chashlistnode_t *curr;
195 chashlistnode_t *newlist=NULL;
196 for(i=0;i<tc_size;i++) {
199 do { //Inner loop to go through linked lists
201 chashlistnode_t *tmp,*next;
203 if ((key=(void *)curr->key) == 0) { //Exit inner loop if there the first element is 0
204 break; //key = val =0 for element if not present within the hash table
207 if (curr->val>=curr_heapbase&&curr->val<curr_heaptop) {
208 SENQUEUE(curr->val, curr->val);
210 //rewrite transaction cache entry
211 void *vptr=curr->val;
212 int type=((int *)vptr)[0];
213 unsigned INTPTR *pointer=pointerarray[type];
215 //array of primitives - do nothing
216 struct ArrayObject *ao=(struct ArrayObject *) vptr;
217 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
218 } else if (((INTPTR)pointer)==1) {
220 struct ArrayObject *ao=(struct ArrayObject *) vptr;
221 int length=ao->___length___;
223 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
225 int lowindex=ao->lowindex;
226 int highindex=ao->highindex;
228 for(j=lowindex; j<=highindex; j++) {
229 unsigned int lockval;
230 GETLOCKVAL(lockval, ao, j);
231 if (lockval!=STMNONE) {
232 int lowi=(j<<INDEXSHIFT)/sizeof(void *);
233 int highi=lowi+(INDEXLENGTH/sizeof(void *));
234 for(i=lowi; i<highi;i++) {
236 for(i=0; i<length; i++) {
238 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
239 SENQUEUE(objptr, ((void **)(((char *)&ao->___length___)+sizeof(int)))[i]);
246 INTPTR size=pointer[0];
248 for(i=1; i<=size; i++) {
249 unsigned int offset=pointer[i];
250 void * objptr=*((void **)(((char *)vptr)+offset));
251 SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
257 index = (((unsigned INTPTR)key) & mask) >>4;
261 // Insert into the new table
263 tmp->key = curr->key;
264 tmp->val = curr->val;
267 } else if (isfirst) {
268 chashlistnode_t *newnode;
269 if ((*cstr)->num<NUMCLIST) {
270 newnode=&(*cstr)->array[(*cstr)->num];
274 cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
277 newnode=&tcl->array[0];
280 newnode->key = curr->key;
281 newnode->val = curr->val;
282 newnode->next = tmp->next;
283 newnode->lnext=newlist;
289 curr->next=tmp->next;
303 if ((head==tail)&&(tailindex==headindex))
309 struct pointerblock *taghead=NULL;
312 void enqueuetag(struct ___TagDescriptor___ *ptr) {
313 if (tagindex==NUMPTRS) {
314 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
319 taghead->ptrs[tagindex++]=ptr;
323 #if defined(STM)||defined(THREADS)||defined(MLP)
324 __thread char * memorybase=NULL;
325 __thread char * memorytop=NULL;
329 void collect(struct garbagelist * stackptr) {
330 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
332 pthread_mutex_lock(&gclistlock);
334 if ((listcount+1)==threadcount) {
335 break; /* Have all other threads stopped */
337 pthread_cond_wait(&gccond, &gclistlock);
341 ptrstack.prev=stackptr;
342 stackptr=(struct garbagelist *) &ptrstack;
343 #if defined(STMARRAY)&&!defined(DUALVIEW)
344 arraystack.prev=stackptr;
345 stackptr=(struct garbagelist *) &arraystack;
352 for(i=0;i<MAXSTATS;i++)
360 head=tail=malloc(sizeof(struct pointerblock));
366 taghead=malloc(sizeof(struct pointerblock));
373 fixtable(&c_table, &c_list, &c_structs, c_size);
376 fixobjlist(lockedobjs);
380 #if defined(STM)||defined(THREADS)||defined(MLP)
384 /* Check current stack */
385 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
387 struct listitem *listptr=list;
391 while(stackptr!=NULL) {
393 for(i=0; i<stackptr->size; i++) {
394 void * orig=stackptr->array[i];
395 ENQUEUE(orig, stackptr->array[i]);
397 stackptr=stackptr->next;
399 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
400 /* Go to next thread */
403 if (listptr==&litem) {
405 // update forward list & memory queue for the current SESE
406 updateForwardList(&((SESEcommon*)listptr->seseCommon)->forwardList,FALSE);
407 updateMemoryQueue((SESEcommon*)(listptr->seseCommon));
409 listptr=listptr->next;
413 struct listitem *litem=pthread_getspecific(litemkey);
414 if (listptr==litem) {
415 listptr=listptr->next;
422 void * orig=listptr->locklist;
423 ENQUEUE(orig, listptr->locklist);
426 if ((*listptr->tc_table)!=NULL) {
427 fixtable(listptr->tc_table, listptr->tc_list, listptr->tc_structs, listptr->tc_size);
428 fixobjlist(listptr->objlist);
430 fixobjlist(listptr->lockedlist);
434 #if defined(STM)||defined(THREADS)||defined(MLP)
435 *(listptr->base)=NULL;
438 // update forward list & memory queue for all running SESEs.
439 if (listptr->seseCommon!=NULL) {
440 updateForwardList(&((SESEcommon*)listptr->seseCommon)->forwardList,FALSE);
441 updateMemoryQueue((SESEcommon*)(listptr->seseCommon));
444 stackptr=listptr->stackptr;
445 listptr=listptr->next;
453 ENQUEUE(___fcrevert___, ___fcrevert___);
456 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
459 stackptr=(struct garbagelist *)global_defs_p;
460 for(i=0; i<stackptr->size; i++) {
461 void * orig=stackptr->array[i];
462 ENQUEUE(orig, stackptr->array[i]);
469 /* Update objectsets */
471 for(i=0; i<NUMCLASSES; i++) {
472 #if !defined(MULTICORE)
473 struct parameterwrapper * p=objectqueues[i];
475 struct ObjectHash * set=p->objectset;
476 struct ObjectNode * ptr=set->listhead;
478 void *orig=(void *)ptr->key;
479 ENQUEUE(orig, *((void **)(&ptr->key)));
482 ObjectHashrehash(set); /* Rehash the table */
491 struct cnode * ptr=forward->listhead;
493 void * orig=(void *)ptr->key;
494 ENQUEUE(orig, *((void **)(&ptr->key)));
497 crehash(forward); /* Rehash the table */
501 struct cnode * ptr=reverse->listhead;
503 void *orig=(void *)ptr->val;
504 ENQUEUE(orig, *((void**)(&ptr->val)));
511 struct RuntimeNode * ptr=fdtoobject->listhead;
513 void *orig=(void *)ptr->data;
514 ENQUEUE(orig, *((void**)(&ptr->data)));
520 /* Update current task descriptor */
522 for(i=0; i<currtpd->numParameters; i++) {
523 void *orig=currtpd->parameterArray[i];
524 ENQUEUE(orig, currtpd->parameterArray[i]);
529 /* Update active tasks */
531 struct genpointerlist * ptr=activetasks->list;
533 struct taskparamdescriptor *tpd=ptr->src;
535 for(i=0; i<tpd->numParameters; i++) {
536 void * orig=tpd->parameterArray[i];
537 ENQUEUE(orig, tpd->parameterArray[i]);
541 genrehash(activetasks);
544 /* Update failed tasks */
546 struct genpointerlist * ptr=failedtasks->list;
548 struct taskparamdescriptor *tpd=ptr->src;
550 for(i=0; i<tpd->numParameters; i++) {
551 void * orig=tpd->parameterArray[i];
552 ENQUEUE(orig, tpd->parameterArray[i]);
556 genrehash(failedtasks);
568 // goes over ready-to-run SESEs
569 for( i = 0; i < numWorkSchedWorkers; ++i ) {
575 // check all the relevant indices of this
576 // node in the deque, noting if we are in
577 // the top/bottom node which can be partially
581 //SESEcommon* common = (SESEcommon*) n->itsDataArr[j];
582 //if(common==seseCommon){
583 // skip the current running SESE
586 di=(dequeItem *) EXTRACTPTR((INTPTR)di);
587 SESEcommon* seseRec = (SESEcommon*) di->work;
589 struct garbagelist* gl = (struct garbagelist*) &(seseRec[1]);
590 struct garbagelist* glroot = gl;
592 updateAscendantSESE( seseRec );
594 while( gl != NULL ) {
596 for( k = 0; k < gl->size; k++ ) {
597 void* orig = gl->array[k];
598 ENQUEUE( orig, gl->array[k] );
603 // we only have to move across the nodes
604 // of the deque if the top and bottom are
605 // not the same already
607 } while( di !=NULL) ;
623 // goes over ready-to-run SESEs
624 for( i = 0; i < numWorkSchedWorkers; ++i ) {
627 botNode = dqDecodePtr( dq->bottom );
628 botIndx = dqDecodeIdx( dq->bottom );
630 topNode = dqDecodePtr( dq->top );
631 topIndx = dqDecodeIdx( dq->top );
636 // check all the relevant indices of this
637 // node in the deque, noting if we are in
638 // the top/bottom node which can be partially
640 if( n == botNode ) { jLo = botIndx; } else { jLo = 0; }
641 if( n == topNode ) { jHi = topIndx; } else { jHi = DQNODE_ARRAYSIZE; }
643 for( j = jLo; j < jHi; ++j ) {
646 //SESEcommon* common = (SESEcommon*) n->itsDataArr[j];
647 //if(common==seseCommon){
651 SESEcommon* seseRec = (SESEcommon*) n->itsDataArr[j];
653 struct garbagelist* gl = (struct garbagelist*) &(seseRec[1]);
654 struct garbagelist* glroot = gl;
656 updateAscendantSESE( seseRec );
658 while( gl != NULL ) {
660 for( k = 0; k < gl->size; k++ ) {
661 void* orig = gl->array[k];
662 ENQUEUE( orig, gl->array[k] );
668 // we only have to move across the nodes
669 // of the deque if the top and bottom are
670 // not the same already
671 if( botNode != topNode ) {
674 } while( n != topNode );
682 void * ptr=dequeue();
684 int type=((int *)cpy)[0];
685 unsigned INTPTR * pointer;
689 /* Nothing is inside */
694 pointer=pointerarray[type];
696 /* Array of primitives */
698 #if defined(DSTM)||defined(FASTCHECK)
699 struct ArrayObject *ao=(struct ArrayObject *) ptr;
700 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
701 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
702 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
705 struct ArrayObject *ao=(struct ArrayObject *) ptr;
706 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
707 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
709 } else if (((INTPTR)pointer)==1) {
710 /* Array of pointers */
711 struct ArrayObject *ao=(struct ArrayObject *) ptr;
712 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
713 #if (defined(DSTM)||defined(FASTCHECK))
714 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
715 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
718 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
720 int length=ao->___length___;
722 for(i=0; i<length; i++) {
723 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
724 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
727 INTPTR size=pointer[0];
729 for(i=1; i<=size; i++) {
730 unsigned int offset=pointer[i];
731 void * objptr=*((void **)(((char *)ptr)+offset));
732 ENQUEUE(objptr, *((void **)(((char *)cpy)+offset)));
740 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
742 pthread_mutex_unlock(&gclistlock);
747 /* Fix up the references from tags. This can't be done earlier,
748 because we don't want tags to keep objects alive */
750 while(taghead!=NULL) {
752 struct pointerblock *tmp=taghead->next;
753 for(i=0; i<tagindex; i++) {
754 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
755 struct ___Object___ *obj=tagd->flagptr;
756 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
758 /* Zero object case */
759 } else if (obj->type==-1) {
760 /* Single object case */
761 copy->flagptr=((struct ___Object___**)obj)[1];
762 } else if (obj->type==OBJECTARRAYTYPE) {
764 struct ArrayObject *ao=(struct ArrayObject *) obj;
768 struct ArrayObject *aonew;
770 /* Count live objects */
771 for(j=0; j<ao->___cachedCode___; j++) {
772 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
777 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
778 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
779 memcpy(aonew, ao, sizeof(struct ArrayObject));
780 aonew->type=OBJECTARRAYTYPE;
781 aonew->___length___=livecount;
783 for(j=0; j<ao->___cachedCode___; j++) {
784 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
785 if (tobj->type==-1) {
786 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
787 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
790 aonew->___cachedCode___=k;
791 for(; k<livecount; k++) {
792 ARRAYSET(aonew, struct ___Object___*, k, NULL);
795 /* No object live anymore */
806 void * tomalloc(int size) {
807 void * ptr=to_heapptr;
814 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
815 void checkcollect(void * ptr) {
816 stopforgc((struct garbagelist *)ptr);
821 void checkcollect2(void * ptr) {
822 int ptrarray[]={1, (int)ptr, (int) revertlist};
823 stopforgc((struct garbagelist *)ptrarray);
825 revertlist=(struct ___Object___*)ptrarray[2];
829 void stopforgc(struct garbagelist * ptr) {
831 //just append us to the list
833 ptr=(struct garbagelist *) &ptrstack;
834 #if defined(STMARRAY)&&!defined(DUALVIEW)
836 ptr=(struct garbagelist *) &arraystack;
842 litem.locklist=pthread_getspecific(threadlocks);
844 #if defined(STM)||defined(THREADS)||defined(MLP)
845 litem.base=&memorybase;
848 litem.tc_size=c_size;
849 litem.tc_table=&c_table;
850 litem.tc_list=&c_list;
851 litem.tc_structs=&c_structs;
852 litem.objlist=newobjs;
854 litem.lockedlist=lockedobjs;
859 struct listitem *litem=pthread_getspecific(litemkey);
862 litem->locklist=pthread_getspecific(threadlocks);
865 pthread_mutex_lock(&gclistlock);
867 if ((listcount+1)==threadcount) {
868 //only do wakeup if we are ready to GC
869 pthread_cond_signal(&gccond);
871 pthread_mutex_unlock(&gclistlock);
874 void restartaftergc() {
876 pthread_mutex_lock(&gclock); // Wait for GC
877 pthread_mutex_unlock(&gclock);
879 pthread_mutex_lock(&gclistlock);
881 pthread_mutex_unlock(&gclistlock);
885 #if defined(STM)||defined(THREADS)||defined(MLP)
886 #define MEMORYBLOCK 65536
887 void * helper(struct garbagelist *, int);
888 void * mygcmalloc(struct garbagelist * stackptr, int size) {
891 if (memorybase==NULL||size>(memorytop-memorybase)) {
892 int toallocate=(size>MEMORYBLOCK)?size:MEMORYBLOCK;
893 memorybase=helper(stackptr, toallocate);
894 bzero(memorybase, toallocate);
895 memorytop=memorybase+toallocate;
897 char *retvalue=memorybase;
902 void * helper(struct garbagelist * stackptr, int size) {
904 void * mygcmalloc(struct garbagelist * stackptr, int size) {
907 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
908 while (pthread_mutex_trylock(&gclock)!=0) {
917 if (curr_heapptr>curr_heapgcpoint||curr_heapptr<curr_heapbase) {
918 if (curr_heapbase==0) {
919 /* Need to allocate base heap */
920 curr_heapbase=malloc(INITIALHEAPSIZE);
921 if (curr_heapbase==NULL) {
922 printf("malloc failed. Garbage colletcor couldn't get enough memory. Try changing heap size.\n");
925 #if defined(STM)||defined(THREADS)||defined(MLP)
927 bzero(curr_heapbase, INITIALHEAPSIZE);
929 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
930 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
931 curr_heapptr=curr_heapbase+size;
933 to_heapbase=malloc(INITIALHEAPSIZE);
934 if (to_heapbase==NULL) {
935 printf("malloc failed. Garbage collector couldn't get enough memory. Try changing heap size.\n");
939 to_heaptop=to_heapbase+INITIALHEAPSIZE;
940 to_heapptr=to_heapbase;
942 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
943 pthread_mutex_unlock(&gclock);
948 /* Grow the to heap if necessary */
950 INTPTR curr_heapsize=curr_heaptop-curr_heapbase;
951 INTPTR to_heapsize=to_heaptop-to_heapbase;
952 INTPTR last_heapsize=0;
954 last_heapsize=HEAPSIZE(lastgcsize, size);
955 if ((last_heapsize&7)!=0)
956 last_heapsize+=(8-(last_heapsize%8));
958 if (curr_heapsize>last_heapsize)
959 last_heapsize=curr_heapsize;
960 if (last_heapsize>to_heapsize) {
962 to_heapbase=malloc(last_heapsize);
963 if (to_heapbase==NULL) {
964 printf("Error Allocating enough memory\n");
967 to_heaptop=to_heapbase+last_heapsize;
968 to_heapptr=to_heapbase;
972 /* Do our collection */
975 /* Update stat on previous gc size */
976 lastgcsize=(to_heapptr-to_heapbase)+size;
979 printf("Garbage collected: Old bytes: %u\n", curr_heapptr-curr_heapbase);
980 printf("New space: %u\n", to_heapptr-to_heapbase);
981 printf("Total space: %u\n", to_heaptop-to_heapbase);
984 for(i=0;i<MAXSTATS;i++) {
985 if (garbagearray[i]!=0)
986 printf("Type=%d Size=%u\n", i, garbagearray[i]);
990 /* Flip to/curr heaps */
992 void * tmp=to_heapbase;
993 to_heapbase=curr_heapbase;
997 to_heaptop=curr_heaptop;
1001 curr_heapptr=to_heapptr+size;
1002 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
1003 to_heapptr=to_heapbase;
1005 /* Not enough room :(, redo gc */
1006 if (curr_heapptr>curr_heapgcpoint) {
1007 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
1008 pthread_mutex_unlock(&gclock);
1010 return mygcmalloc(stackptr, size);
1013 bzero(tmp, curr_heaptop-tmp);
1014 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
1015 pthread_mutex_unlock(&gclock);
1020 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
1021 pthread_mutex_unlock(&gclock);
1027 int gc_createcopy(void * orig, void ** copy_ptr) {
1032 int type=((int *)orig)[0];
1034 *copy_ptr=((void **)orig)[1];
1037 if (type<NUMCLASSES) {
1038 /* We have a normal object */
1040 int size=classsize[type]+sizeof(objheader_t);
1041 void *newobj=tomalloc(size);
1042 memcpy(newobj,((char *) orig)-sizeof(objheader_t), size);
1043 newobj=((char *)newobj)+sizeof(objheader_t);
1045 int size=classsize[type];
1046 void *newobj=tomalloc(size);
1047 memcpy(newobj, orig, size);
1050 garbagearray[type]+=size;
1052 ((int *)orig)[0]=-1;
1053 ((void **)orig)[1]=newobj;
1057 /* We have an array */
1058 struct ArrayObject *ao=(struct ArrayObject *)orig;
1059 int elementsize=classsize[type];
1060 int length=ao->___length___;
1063 int basesize=length*elementsize;
1064 basesize=(basesize+LOWMASK)&HIGHMASK;
1065 int versionspace=sizeof(int)*2*(basesize>>INDEXSHIFT);
1066 int size=sizeof(struct ArrayObject)+basesize+sizeof(objheader_t)+versionspace;
1067 void *newobj=tomalloc(size);
1068 memcpy(newobj, ((char*)orig)-sizeof(objheader_t)-versionspace, size);
1069 newobj=((char *)newobj)+sizeof(objheader_t)+versionspace;
1071 int size=sizeof(struct ArrayObject)+length*elementsize+sizeof(objheader_t);
1072 void *newobj=tomalloc(size);
1073 memcpy(newobj, ((char*)orig)-sizeof(objheader_t), size);
1074 newobj=((char *)newobj)+sizeof(objheader_t);
1077 int size=sizeof(struct ArrayObject)+length*elementsize;
1078 void *newobj=tomalloc(size);
1079 memcpy(newobj, orig, size);
1082 garbagearray[type]+=size;
1084 ((int *)orig)[0]=-1;
1085 ((void **)orig)[1]=newobj;
1093 updateForwardList(struct Queue *forwardList, int prevUpdate){
1095 struct QueueItem * fqItem=getHead(forwardList);
1096 while(fqItem!=NULL){
1097 SESEcommon* seseRec = (SESEcommon*)(fqItem->objectptr);
1098 struct garbagelist * gl=(struct garbagelist *)&(seseRec[1]);
1099 if(prevUpdate==TRUE){
1100 updateAscendantSESE(seseRec);
1102 // do something here
1105 for(i=0; i<gl->size; i++) {
1106 void * orig=gl->array[i];
1107 ENQUEUE(orig, gl->array[i]);
1111 // iterate forwarding list of seseRec
1112 struct Queue* fList=&seseRec->forwardList;
1113 updateForwardList(fList,prevUpdate);
1114 fqItem=getNextQueueItem(fqItem);
1119 updateMemoryQueue(SESEcommon* seseParent){
1120 // update memory queue
1122 for(i=0; i<seseParent->numMemoryQueue; i++){
1123 MemoryQueue *memoryQueue=seseParent->memoryQueueArray[i];
1124 MemoryQueueItem *memoryItem=memoryQueue->head;
1125 while(memoryItem!=NULL){
1126 if(memoryItem->type==HASHTABLE){
1127 Hashtable *ht=(Hashtable*)memoryItem;
1128 for(binidx=0; binidx<NUMBINS; binidx++){
1129 BinElement *bin=ht->array[binidx];
1130 BinItem *binItem=bin->head;
1131 while(binItem!=NULL){
1132 if(binItem->type==READBIN){
1133 ReadBinItem* readBinItem=(ReadBinItem*)binItem;
1135 for(ridx=0; ridx<readBinItem->index; ridx++){
1136 REntry *rentry=readBinItem->array[ridx];
1137 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1138 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1139 updateAscendantSESE(seseRec);
1142 for(i=0; i<gl->size; i++) {
1143 void * orig=gl->array[i];
1144 ENQUEUE(orig, gl->array[i]);
1150 REntry *rentry=((WriteBinItem*)binItem)->val;
1151 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1152 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1153 updateAscendantSESE(seseRec);
1156 for(i=0; i<gl->size; i++) {
1157 void * orig=gl->array[i];
1158 ENQUEUE(orig, gl->array[i]);
1163 binItem=binItem->next;
1166 }else if(memoryItem->type==VECTOR){
1167 Vector *vt=(Vector*)memoryItem;
1169 for(idx=0; idx<vt->index; idx++){
1170 REntry *rentry=vt->array[idx];
1172 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1173 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1174 updateAscendantSESE(seseRec);
1177 for(i=0; i<gl->size; i++) {
1178 void * orig=gl->array[i];
1179 ENQUEUE(orig, gl->array[i]);
1185 }else if(memoryItem->type==SINGLEITEM){
1186 SCC *scc=(SCC*)memoryItem;
1187 REntry *rentry=scc->val;
1189 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1190 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1191 updateAscendantSESE(seseRec);
1194 for(i=0; i<gl->size; i++) {
1195 void * orig=gl->array[i];
1196 ENQUEUE(orig, gl->array[i]);
1202 memoryItem=memoryItem->next;
1207 updateAscendantSESE(SESEcommon* seseRec){
1209 for(prevIdx=0; prevIdx<(seseRec->numDependentSESErecords); prevIdx++){
1210 SESEcommon* prevSESE = (SESEcommon*)
1213 seseRec->offsetToDepSESErecords +
1214 (sizeof(INTPTR)*prevIdx)
1218 struct garbagelist * prevgl=(struct garbagelist *)&(((SESEcommon*)(prevSESE))[1]);
1219 while(prevgl!=NULL) {
1221 for(i=0; i<prevgl->size; i++) {
1222 void * orig=prevgl->array[i];
1223 ENQUEUE(orig, prevgl->array[i]);
1225 prevgl=prevgl->next;
1233 int within(void *ptr){ //debug function
1234 if(ptr>curr_heapptr || ptr<curr_heapbase){
1235 __asm__ __volatile__ ("int $3"); // breakpoint