3 #include "structdefs.h"
5 #include "SimpleHash.h"
7 #include "GenericHashtable.h"
9 #if defined(THREADS) || defined(DSTM) || defined(STM)
18 #include "workschedule.h"
19 extern int numWorkSchedWorkers;
28 #include <DSTM/interface_recovery/dstm.h>
30 #include <DSTM/interface/dstm.h>
37 #include "delaycomp.h"
43 #ifndef INITIALHEAPSIZE_MB
44 #define INITIALHEAPSIZE_MB (256)
47 #define INITIALHEAPSIZE INITIALHEAPSIZE_MB*1024*1024L
48 #define GCPOINT(x) ((INTPTR)((x)*0.99))
49 /* This define takes in how full the heap is initially and returns a new heap size to use */
50 #define HEAPSIZE(x,y) ((INTPTR)(x+y))*2
53 extern struct genhashtable * activetasks;
55 extern struct parameterwrapper * objectqueues[NUMCLASSES];
57 extern struct genhashtable * failedtasks;
58 extern struct taskparamdescriptor *currtpd;
59 extern struct ctable *forward;
60 extern struct ctable *reverse;
61 extern struct RuntimeHash *fdtoobject;
66 long garbagearray[MAXSTATS];
69 #if defined(THREADS) || defined(DSTM) || defined(STM)||defined(MLP)
71 struct listitem * list=NULL;
74 __thread struct listitem litem;
79 __thread SESEcommon* seseCommon;
82 //Need to check if pointers are transaction pointers
83 //this also catches the special flag value of 1 for local copies
85 #define ENQUEUE(orig, dst) \
86 if ((!(((unsigned int)orig)&0x1))) { \
87 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
89 if (gc_createcopy(orig,©)) \
95 #define ENQUEUE(orig, dst) \
96 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
98 if (gc_createcopy(orig,©)) \
102 #define SENQUEUE(orig, dst) \
105 if (gc_createcopy(orig,©)) \
109 #elif defined(FASTCHECK)
110 #define ENQUEUE(orig, dst) \
111 if (((unsigned int)orig)!=1) { \
113 if (gc_createcopy(orig,©)) \
117 #define ENQUEUE(orig, dst) \
118 if (orig>=curr_heapbase&&orig<curr_heaptop) { \
120 if (gc_createcopy(orig,©)) \
127 struct pointerblock {
128 void * ptrs[NUMPTRS];
129 struct pointerblock *next;
132 void * curr_heapbase=0;
133 void * curr_heapptr=0;
134 void * curr_heapgcpoint=0;
135 void * curr_heaptop=0;
137 void * to_heapbase=0;
143 struct pointerblock *head=NULL;
145 struct pointerblock *tail=NULL;
147 struct pointerblock *spare=NULL;
149 void enqueue(void *ptr) {
150 if (headindex==NUMPTRS) {
151 struct pointerblock * tmp;
155 } else tmp=malloc(sizeof(struct pointerblock));
160 head->ptrs[headindex++]=ptr;
164 if (tailindex==NUMPTRS) {
165 struct pointerblock *tmp=tail;
173 return tail->ptrs[tailindex++];
177 void fixobjlist(struct objlist * ptr) {
180 for(i=0;i<ptr->offset;i++) {
181 SENQUEUE(ptr->objs[i], ptr->objs[i]);
187 void fixtable(chashlistnode_t ** tc_table, chashlistnode_t **tc_list, cliststruct_t **cstr, unsigned int tc_size) {
188 unsigned int mask=(tc_size<<4)-1;
189 chashlistnode_t *node=calloc(tc_size, sizeof(chashlistnode_t));
190 chashlistnode_t *ptr=*tc_table;
191 chashlistnode_t *curr;
195 chashlistnode_t *newlist=NULL;
196 for(i=0;i<tc_size;i++) {
199 do { //Inner loop to go through linked lists
201 chashlistnode_t *tmp,*next;
203 if ((key=(void *)curr->key) == 0) { //Exit inner loop if there the first element is 0
204 break; //key = val =0 for element if not present within the hash table
207 if (curr->val>=curr_heapbase&&curr->val<curr_heaptop) {
208 SENQUEUE(curr->val, curr->val);
210 //rewrite transaction cache entry
211 void *vptr=curr->val;
212 int type=((int *)vptr)[0];
213 unsigned INTPTR *pointer=pointerarray[type];
215 //array of primitives - do nothing
216 struct ArrayObject *ao=(struct ArrayObject *) vptr;
217 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
218 } else if (((INTPTR)pointer)==1) {
220 struct ArrayObject *ao=(struct ArrayObject *) vptr;
221 int length=ao->___length___;
223 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao->___objlocation___));
225 int lowindex=ao->lowindex;
226 int highindex=ao->highindex;
228 for(j=lowindex; j<=highindex; j++) {
229 unsigned int lockval;
230 GETLOCKVAL(lockval, ao, j);
231 if (lockval!=STMNONE) {
232 int lowi=(j<<INDEXSHIFT)/sizeof(void *);
233 int highi=lowi+(INDEXLENGTH/sizeof(void *));
234 for(i=lowi; i<highi;i++) {
236 for(i=0; i<length; i++) {
238 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
239 SENQUEUE(objptr, ((void **)(((char *)&ao->___length___)+sizeof(int)))[i]);
246 INTPTR size=pointer[0];
248 for(i=1; i<=size; i++) {
249 unsigned int offset=pointer[i];
250 void * objptr=*((void **)(((char *)vptr)+offset));
251 SENQUEUE(objptr, *((void **)(((char *)vptr)+offset)));
257 index = (((unsigned INTPTR)key) & mask) >>4;
261 // Insert into the new table
263 tmp->key = curr->key;
264 tmp->val = curr->val;
267 } else if (isfirst) {
268 chashlistnode_t *newnode;
269 if ((*cstr)->num<NUMCLIST) {
270 newnode=&(*cstr)->array[(*cstr)->num];
274 cliststruct_t *tcl=calloc(1,sizeof(cliststruct_t));
277 newnode=&tcl->array[0];
280 newnode->key = curr->key;
281 newnode->val = curr->val;
282 newnode->next = tmp->next;
283 newnode->lnext=newlist;
289 curr->next=tmp->next;
303 if ((head==tail)&&(tailindex==headindex))
309 struct pointerblock *taghead=NULL;
312 void enqueuetag(struct ___TagDescriptor___ *ptr) {
313 if (tagindex==NUMPTRS) {
314 struct pointerblock * tmp=malloc(sizeof(struct pointerblock));
319 taghead->ptrs[tagindex++]=ptr;
323 #if defined(STM)||defined(THREADS)||defined(MLP)
324 __thread char * memorybase=NULL;
325 __thread char * memorytop=NULL;
329 void collect(struct garbagelist * stackptr) {
330 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
332 pthread_mutex_lock(&gclistlock);
334 if ((listcount+1)==threadcount) {
335 break; /* Have all other threads stopped */
337 pthread_cond_wait(&gccond, &gclistlock);
341 ptrstack.prev=stackptr;
342 stackptr=(struct garbagelist *) &ptrstack;
343 #if defined(STMARRAY)&&!defined(DUALVIEW)
344 arraystack.prev=stackptr;
345 stackptr=(struct garbagelist *) &arraystack;
352 for(i=0;i<MAXSTATS;i++)
360 head=tail=malloc(sizeof(struct pointerblock));
366 taghead=malloc(sizeof(struct pointerblock));
373 fixtable(&c_table, &c_list, &c_structs, c_size);
376 fixobjlist(lockedobjs);
380 #if defined(STM)||defined(THREADS)||defined(MLP)
384 /* Check current stack */
385 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
387 struct listitem *listptr=list;
391 while(stackptr!=NULL) {
393 for(i=0; i<stackptr->size; i++) {
394 void * orig=stackptr->array[i];
395 ENQUEUE(orig, stackptr->array[i]);
397 stackptr=stackptr->next;
399 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
400 /* Go to next thread */
403 if (listptr==&litem) {
405 // update forward list & memory queue for the current SESE
406 updateForwardList(((SESEcommon*)listptr->seseCommon)->forwardList,FALSE);
407 updateMemoryQueue((SESEcommon*)(listptr->seseCommon));
409 listptr=listptr->next;
413 struct listitem *litem=pthread_getspecific(litemkey);
414 if (listptr==litem) {
415 listptr=listptr->next;
422 void * orig=listptr->locklist;
423 ENQUEUE(orig, listptr->locklist);
426 if ((*listptr->tc_table)!=NULL) {
427 fixtable(listptr->tc_table, listptr->tc_list, listptr->tc_structs, listptr->tc_size);
428 fixobjlist(listptr->objlist);
430 fixobjlist(listptr->lockedlist);
434 #if defined(STM)||defined(THREADS)||defined(MLP)
435 *(listptr->base)=NULL;
438 // update forward list & memory queue for all running SESEs.
439 updateForwardList(((SESEcommon*)listptr->seseCommon)->forwardList,FALSE);
440 updateMemoryQueue((SESEcommon*)(listptr->seseCommon));
442 stackptr=listptr->stackptr;
443 listptr=listptr->next;
451 ENQUEUE(___fcrevert___, ___fcrevert___);
456 /* Update objectsets */
458 for(i=0; i<NUMCLASSES; i++) {
459 #if !defined(MULTICORE)
460 struct parameterwrapper * p=objectqueues[i];
462 struct ObjectHash * set=p->objectset;
463 struct ObjectNode * ptr=set->listhead;
465 void *orig=(void *)ptr->key;
466 ENQUEUE(orig, *((void **)(&ptr->key)));
469 ObjectHashrehash(set); /* Rehash the table */
478 struct cnode * ptr=forward->listhead;
480 void * orig=(void *)ptr->key;
481 ENQUEUE(orig, *((void **)(&ptr->key)));
484 crehash(forward); /* Rehash the table */
488 struct cnode * ptr=reverse->listhead;
490 void *orig=(void *)ptr->val;
491 ENQUEUE(orig, *((void**)(&ptr->val)));
498 struct RuntimeNode * ptr=fdtoobject->listhead;
500 void *orig=(void *)ptr->data;
501 ENQUEUE(orig, *((void**)(&ptr->data)));
507 /* Update current task descriptor */
509 for(i=0; i<currtpd->numParameters; i++) {
510 void *orig=currtpd->parameterArray[i];
511 ENQUEUE(orig, currtpd->parameterArray[i]);
516 /* Update active tasks */
518 struct genpointerlist * ptr=activetasks->list;
520 struct taskparamdescriptor *tpd=ptr->src;
522 for(i=0; i<tpd->numParameters; i++) {
523 void * orig=tpd->parameterArray[i];
524 ENQUEUE(orig, tpd->parameterArray[i]);
528 genrehash(activetasks);
531 /* Update failed tasks */
533 struct genpointerlist * ptr=failedtasks->list;
535 struct taskparamdescriptor *tpd=ptr->src;
537 for(i=0; i<tpd->numParameters; i++) {
538 void * orig=tpd->parameterArray[i];
539 ENQUEUE(orig, tpd->parameterArray[i]);
543 genrehash(failedtasks);
555 // goes over ready-to-run SESEs
556 for( i = 0; i < numWorkSchedWorkers; ++i ) {
562 // check all the relevant indices of this
563 // node in the deque, noting if we are in
564 // the top/bottom node which can be partially
568 //SESEcommon* common = (SESEcommon*) n->itsDataArr[j];
569 //if(common==seseCommon){
570 // skip the current running SESE
574 SESEcommon* seseRec = (SESEcommon*) di->work;
575 struct garbagelist* gl = (struct garbagelist*) &(seseRec[1]);
576 struct garbagelist* glroot = gl;
578 updateAscendantSESE( seseRec );
580 while( gl != NULL ) {
582 for( k = 0; k < gl->size; k++ ) {
583 void* orig = gl->array[k];
584 ENQUEUE( orig, gl->array[k] );
589 // we only have to move across the nodes
590 // of the deque if the top and bottom are
591 // not the same already
593 } while( di !=NULL) ;
609 // goes over ready-to-run SESEs
610 for( i = 0; i < numWorkSchedWorkers; ++i ) {
613 botNode = dqDecodePtr( dq->bottom );
614 botIndx = dqDecodeIdx( dq->bottom );
616 topNode = dqDecodePtr( dq->top );
617 topIndx = dqDecodeIdx( dq->top );
622 // check all the relevant indices of this
623 // node in the deque, noting if we are in
624 // the top/bottom node which can be partially
626 if( n == botNode ) { jLo = botIndx; } else { jLo = 0; }
627 if( n == topNode ) { jHi = topIndx; } else { jHi = DQNODE_ARRAYSIZE; }
629 for( j = jLo; j < jHi; ++j ) {
632 //SESEcommon* common = (SESEcommon*) n->itsDataArr[j];
633 //if(common==seseCommon){
637 SESEcommon* seseRec = (SESEcommon*) n->itsDataArr[j];
639 struct garbagelist* gl = (struct garbagelist*) &(seseRec[1]);
640 struct garbagelist* glroot = gl;
642 updateAscendantSESE( seseRec );
644 while( gl != NULL ) {
646 for( k = 0; k < gl->size; k++ ) {
647 void* orig = gl->array[k];
648 ENQUEUE( orig, gl->array[k] );
654 // we only have to move across the nodes
655 // of the deque if the top and bottom are
656 // not the same already
657 if( botNode != topNode ) {
660 } while( n != topNode );
668 void * ptr=dequeue();
670 int type=((int *)cpy)[0];
671 unsigned INTPTR * pointer;
675 /* Nothing is inside */
680 pointer=pointerarray[type];
682 /* Array of primitives */
684 #if defined(DSTM)||defined(FASTCHECK)
685 struct ArrayObject *ao=(struct ArrayObject *) ptr;
686 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
687 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
688 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
691 struct ArrayObject *ao=(struct ArrayObject *) ptr;
692 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
693 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
695 } else if (((INTPTR)pointer)==1) {
696 /* Array of pointers */
697 struct ArrayObject *ao=(struct ArrayObject *) ptr;
698 struct ArrayObject *ao_cpy=(struct ArrayObject *) cpy;
699 #if (defined(DSTM)||defined(FASTCHECK))
700 ENQUEUE((void *)ao->___nextobject___, *((void **)&ao_cpy->___nextobject___));
701 ENQUEUE((void *)ao->___localcopy___, *((void **)&ao_cpy->___localcopy___));
704 SENQUEUE((void *)ao->___objlocation___, *((void **)&ao_cpy->___objlocation___));
706 int length=ao->___length___;
708 for(i=0; i<length; i++) {
709 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
710 ENQUEUE(objptr, ((void **)(((char *)&ao_cpy->___length___)+sizeof(int)))[i]);
713 INTPTR size=pointer[0];
715 for(i=1; i<=size; i++) {
716 unsigned int offset=pointer[i];
717 void * objptr=*((void **)(((char *)ptr)+offset));
718 ENQUEUE(objptr, *((void **)(((char *)cpy)+offset)));
726 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
728 pthread_mutex_unlock(&gclistlock);
733 /* Fix up the references from tags. This can't be done earlier,
734 because we don't want tags to keep objects alive */
736 while(taghead!=NULL) {
738 struct pointerblock *tmp=taghead->next;
739 for(i=0; i<tagindex; i++) {
740 struct ___TagDescriptor___ *tagd=taghead->ptrs[i];
741 struct ___Object___ *obj=tagd->flagptr;
742 struct ___TagDescriptor___ *copy=((struct ___TagDescriptor___**)tagd)[1];
744 /* Zero object case */
745 } else if (obj->type==-1) {
746 /* Single object case */
747 copy->flagptr=((struct ___Object___**)obj)[1];
748 } else if (obj->type==OBJECTARRAYTYPE) {
750 struct ArrayObject *ao=(struct ArrayObject *) obj;
754 struct ArrayObject *aonew;
756 /* Count live objects */
757 for(j=0; j<ao->___cachedCode___; j++) {
758 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
763 livecount=((livecount-1)/OBJECTARRAYINTERVAL+1)*OBJECTARRAYINTERVAL;
764 aonew=(struct ArrayObject *) tomalloc(sizeof(struct ArrayObject)+sizeof(struct ___Object___*)*livecount);
765 memcpy(aonew, ao, sizeof(struct ArrayObject));
766 aonew->type=OBJECTARRAYTYPE;
767 aonew->___length___=livecount;
769 for(j=0; j<ao->___cachedCode___; j++) {
770 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, j);
771 if (tobj->type==-1) {
772 struct ___Object___ * tobjcpy=((struct ___Object___**)tobj)[1];
773 ARRAYSET(aonew, struct ___Object___*, k++,tobjcpy);
776 aonew->___cachedCode___=k;
777 for(; k<livecount; k++) {
778 ARRAYSET(aonew, struct ___Object___*, k, NULL);
781 /* No object live anymore */
792 void * tomalloc(int size) {
793 void * ptr=to_heapptr;
800 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
801 void checkcollect(void * ptr) {
802 stopforgc((struct garbagelist *)ptr);
807 void checkcollect2(void * ptr) {
808 int ptrarray[]={1, (int)ptr, (int) revertlist};
809 stopforgc((struct garbagelist *)ptrarray);
811 revertlist=(struct ___Object___*)ptrarray[2];
815 void stopforgc(struct garbagelist * ptr) {
817 //just append us to the list
819 ptr=(struct garbagelist *) &ptrstack;
820 #if defined(STMARRAY)&&!defined(DUALVIEW)
822 ptr=(struct garbagelist *) &arraystack;
828 litem.locklist=pthread_getspecific(threadlocks);
830 #if defined(STM)||defined(THREADS)||defined(MLP)
831 litem.base=&memorybase;
834 litem.tc_size=c_size;
835 litem.tc_table=&c_table;
836 litem.tc_list=&c_list;
837 litem.tc_structs=&c_structs;
838 litem.objlist=newobjs;
840 litem.lockedlist=lockedobjs;
845 struct listitem *litem=pthread_getspecific(litemkey);
848 litem->locklist=pthread_getspecific(threadlocks);
851 pthread_mutex_lock(&gclistlock);
853 if ((listcount+1)==threadcount) {
854 //only do wakeup if we are ready to GC
855 pthread_cond_signal(&gccond);
857 pthread_mutex_unlock(&gclistlock);
860 void restartaftergc() {
862 pthread_mutex_lock(&gclock); // Wait for GC
863 pthread_mutex_unlock(&gclock);
865 pthread_mutex_lock(&gclistlock);
867 pthread_mutex_unlock(&gclistlock);
871 #if defined(STM)||defined(THREADS)||defined(MLP)
872 #define MEMORYBLOCK 65536
873 void * helper(struct garbagelist *, int);
874 void * mygcmalloc(struct garbagelist * stackptr, int size) {
877 if (memorybase==NULL||size>(memorytop-memorybase)) {
878 int toallocate=(size>MEMORYBLOCK)?size:MEMORYBLOCK;
879 memorybase=helper(stackptr, toallocate);
880 bzero(memorybase, toallocate);
881 memorytop=memorybase+toallocate;
883 char *retvalue=memorybase;
888 void * helper(struct garbagelist * stackptr, int size) {
890 void * mygcmalloc(struct garbagelist * stackptr, int size) {
893 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
894 while (pthread_mutex_trylock(&gclock)!=0) {
903 if (curr_heapptr>curr_heapgcpoint||curr_heapptr<curr_heapbase) {
904 if (curr_heapbase==0) {
905 /* Need to allocate base heap */
906 curr_heapbase=malloc(INITIALHEAPSIZE);
907 if (curr_heapbase==NULL) {
908 printf("malloc failed. Garbage colletcor couldn't get enough memory. Try changing heap size.\n");
911 #if defined(STM)||defined(THREADS)||defined(MLP)
913 bzero(curr_heapbase, INITIALHEAPSIZE);
915 curr_heaptop=curr_heapbase+INITIALHEAPSIZE;
916 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(INITIALHEAPSIZE);
917 curr_heapptr=curr_heapbase+size;
919 to_heapbase=malloc(INITIALHEAPSIZE);
920 if (to_heapbase==NULL) {
921 printf("malloc failed. Garbage collector couldn't get enough memory. Try changing heap size.\n");
925 to_heaptop=to_heapbase+INITIALHEAPSIZE;
926 to_heapptr=to_heapbase;
928 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
929 pthread_mutex_unlock(&gclock);
934 /* Grow the to heap if necessary */
936 INTPTR curr_heapsize=curr_heaptop-curr_heapbase;
937 INTPTR to_heapsize=to_heaptop-to_heapbase;
938 INTPTR last_heapsize=0;
940 last_heapsize=HEAPSIZE(lastgcsize, size);
941 if ((last_heapsize&7)!=0)
942 last_heapsize+=(8-(last_heapsize%8));
944 if (curr_heapsize>last_heapsize)
945 last_heapsize=curr_heapsize;
946 if (last_heapsize>to_heapsize) {
948 to_heapbase=malloc(last_heapsize);
949 if (to_heapbase==NULL) {
950 printf("Error Allocating enough memory\n");
953 to_heaptop=to_heapbase+last_heapsize;
954 to_heapptr=to_heapbase;
958 /* Do our collection */
961 /* Update stat on previous gc size */
962 lastgcsize=(to_heapptr-to_heapbase)+size;
965 printf("Garbage collected: Old bytes: %u\n", curr_heapptr-curr_heapbase);
966 printf("New space: %u\n", to_heapptr-to_heapbase);
967 printf("Total space: %u\n", to_heaptop-to_heapbase);
970 for(i=0;i<MAXSTATS;i++) {
971 if (garbagearray[i]!=0)
972 printf("Type=%d Size=%u\n", i, garbagearray[i]);
976 /* Flip to/curr heaps */
978 void * tmp=to_heapbase;
979 to_heapbase=curr_heapbase;
983 to_heaptop=curr_heaptop;
987 curr_heapptr=to_heapptr+size;
988 curr_heapgcpoint=((char *) curr_heapbase)+GCPOINT(curr_heaptop-curr_heapbase);
989 to_heapptr=to_heapbase;
991 /* Not enough room :(, redo gc */
992 if (curr_heapptr>curr_heapgcpoint) {
993 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
994 pthread_mutex_unlock(&gclock);
996 return mygcmalloc(stackptr, size);
999 bzero(tmp, curr_heaptop-tmp);
1000 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
1001 pthread_mutex_unlock(&gclock);
1006 #if defined(THREADS)||defined(DSTM)||defined(STM)||defined(MLP)
1007 pthread_mutex_unlock(&gclock);
1013 int gc_createcopy(void * orig, void ** copy_ptr) {
1018 int type=((int *)orig)[0];
1020 *copy_ptr=((void **)orig)[1];
1023 if (type<NUMCLASSES) {
1024 /* We have a normal object */
1026 int size=classsize[type]+sizeof(objheader_t);
1027 void *newobj=tomalloc(size);
1028 memcpy(newobj,((char *) orig)-sizeof(objheader_t), size);
1029 newobj=((char *)newobj)+sizeof(objheader_t);
1031 int size=classsize[type];
1032 void *newobj=tomalloc(size);
1033 memcpy(newobj, orig, size);
1036 garbagearray[type]+=size;
1038 ((int *)orig)[0]=-1;
1039 ((void **)orig)[1]=newobj;
1043 /* We have an array */
1044 struct ArrayObject *ao=(struct ArrayObject *)orig;
1045 int elementsize=classsize[type];
1046 int length=ao->___length___;
1049 int basesize=length*elementsize;
1050 basesize=(basesize+LOWMASK)&HIGHMASK;
1051 int versionspace=sizeof(int)*2*(basesize>>INDEXSHIFT);
1052 int size=sizeof(struct ArrayObject)+basesize+sizeof(objheader_t)+versionspace;
1053 void *newobj=tomalloc(size);
1054 memcpy(newobj, ((char*)orig)-sizeof(objheader_t)-versionspace, size);
1055 newobj=((char *)newobj)+sizeof(objheader_t)+versionspace;
1057 int size=sizeof(struct ArrayObject)+length*elementsize+sizeof(objheader_t);
1058 void *newobj=tomalloc(size);
1059 memcpy(newobj, ((char*)orig)-sizeof(objheader_t), size);
1060 newobj=((char *)newobj)+sizeof(objheader_t);
1063 int size=sizeof(struct ArrayObject)+length*elementsize;
1064 void *newobj=tomalloc(size);
1065 memcpy(newobj, orig, size);
1068 garbagearray[type]+=size;
1070 ((int *)orig)[0]=-1;
1071 ((void **)orig)[1]=newobj;
1079 updateForwardList(struct Queue *forwardList, int prevUpdate){
1081 struct QueueItem * fqItem=getHead(forwardList);
1082 while(fqItem!=NULL){
1083 SESEcommon* seseRec = (SESEcommon*)(fqItem->objectptr);
1084 struct garbagelist * gl=(struct garbagelist *)&(seseRec[1]);
1085 if(prevUpdate==TRUE){
1086 updateAscendantSESE(seseRec);
1088 // do something here
1091 for(i=0; i<gl->size; i++) {
1092 void * orig=gl->array[i];
1093 ENQUEUE(orig, gl->array[i]);
1097 // iterate forwarding list of seseRec
1098 struct Queue* fList=&seseRec->forwardList;
1099 updateForwardList(fList,prevUpdate);
1100 fqItem=getNextQueueItem(fqItem);
1105 updateMemoryQueue(SESEcommon* seseParent){
1106 // update memory queue
1108 for(i=0; i<seseParent->numMemoryQueue; i++){
1109 MemoryQueue *memoryQueue=seseParent->memoryQueueArray[i];
1110 MemoryQueueItem *memoryItem=memoryQueue->head;
1111 while(memoryItem!=NULL){
1112 if(memoryItem->type==HASHTABLE){
1113 Hashtable *ht=(Hashtable*)memoryItem;
1114 for(binidx=0; binidx<NUMBINS; binidx++){
1115 BinElement *bin=ht->array[binidx];
1116 BinItem *binItem=bin->head;
1117 while(binItem!=NULL){
1118 if(binItem->type==READBIN){
1119 ReadBinItem* readBinItem=(ReadBinItem*)binItem;
1121 for(ridx=0; ridx<readBinItem->index; ridx++){
1122 REntry *rentry=readBinItem->array[ridx];
1123 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1124 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1125 updateAscendantSESE(seseRec);
1128 for(i=0; i<gl->size; i++) {
1129 void * orig=gl->array[i];
1130 ENQUEUE(orig, gl->array[i]);
1136 REntry *rentry=((WriteBinItem*)binItem)->val;
1137 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1138 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1139 updateAscendantSESE(seseRec);
1142 for(i=0; i<gl->size; i++) {
1143 void * orig=gl->array[i];
1144 ENQUEUE(orig, gl->array[i]);
1149 binItem=binItem->next;
1152 }else if(memoryItem->type==VECTOR){
1153 Vector *vt=(Vector*)memoryItem;
1155 for(idx=0; idx<vt->index; idx++){
1156 REntry *rentry=vt->array[idx];
1158 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1159 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1160 updateAscendantSESE(seseRec);
1163 for(i=0; i<gl->size; i++) {
1164 void * orig=gl->array[i];
1165 ENQUEUE(orig, gl->array[i]);
1171 }else if(memoryItem->type==SINGLEITEM){
1172 SCC *scc=(SCC*)memoryItem;
1173 REntry *rentry=scc->val;
1175 SESEcommon* seseRec = (SESEcommon*)(rentry->seseRec);
1176 struct garbagelist * gl= (struct garbagelist *)&(seseRec[1]);
1177 updateAscendantSESE(seseRec);
1180 for(i=0; i<gl->size; i++) {
1181 void * orig=gl->array[i];
1182 ENQUEUE(orig, gl->array[i]);
1188 memoryItem=memoryItem->next;
1193 updateAscendantSESE(SESEcommon* seseRec){
1195 for(prevIdx=0; prevIdx<(seseRec->numDependentSESErecords); prevIdx++){
1196 SESEcommon* prevSESE = (SESEcommon*)
1199 seseRec->offsetToDepSESErecords +
1200 (sizeof(INTPTR)*prevIdx)
1204 struct garbagelist * prevgl=(struct garbagelist *)&(((SESEcommon*)(prevSESE))[1]);
1205 while(prevgl!=NULL) {
1207 for(i=0; i<prevgl->size; i++) {
1208 void * orig=prevgl->array[i];
1209 ENQUEUE(orig, prevgl->array[i]);
1211 prevgl=prevgl->next;
1219 int within(void *ptr){ //debug function
1220 if(ptr>curr_heapptr || ptr<curr_heapbase){
1221 __asm__ __volatile__ ("int $3"); // breakpoint