From f32f3675d1ddc3623fc365c6aea5b959c96f24ad Mon Sep 17 00:00:00 2001 From: jzhou Date: Thu, 6 Aug 2009 00:20:26 +0000 Subject: [PATCH] add compact phase code for multicore gc, not finished yet --- Robust/src/Runtime/mem.c | 5 +- Robust/src/Runtime/multicoregarbage.c | 328 ++++++++++++++++++++++---- Robust/src/Runtime/multicoregarbage.h | 36 ++- Robust/src/Runtime/multicoreruntime.h | 1 - Robust/src/Runtime/multicoretask.c | 80 ++----- 5 files changed, 335 insertions(+), 115 deletions(-) diff --git a/Robust/src/Runtime/mem.c b/Robust/src/Runtime/mem.c index e3995299..e4854e57 100644 --- a/Robust/src/Runtime/mem.c +++ b/Robust/src/Runtime/mem.c @@ -33,7 +33,10 @@ memalloc: goto memalloc; } BAMBOO_CLOSE_CRITICAL_SECTION_MEM(); - return (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK)); + void * alignedp = (void *)(BAMBOO_CACHE_LINE_SIZE+((int)p-1)&(~BAMBOO_CACHE_LINE_MASK)); + memset(p, -2, (alignedp - p)); + memset(alignedp + size, -2, p + isize - alignedp - size); + return alignedp; } #else void * mycalloc_share(int m, int size) { diff --git a/Robust/src/Runtime/multicoregarbage.c b/Robust/src/Runtime/multicoregarbage.c index 6c206203..5c4362f0 100644 --- a/Robust/src/Runtime/multicoregarbage.c +++ b/Robust/src/Runtime/multicoregarbage.c @@ -70,6 +70,7 @@ int gc_moreItems2() { } INTPTR curr_heaptop = 0; +INTPTR curr_heapbound = 0; bool isLarge(void * ptr, int * ttype, int * tsize) { // check if a pointer is referring to a large object @@ -110,6 +111,138 @@ void transferMarkResults() { // invoked inside interruptiong handler } +void transferCompactStart(int core) { + // send start compact messages to all cores + // TODO no large obj info + int msgsize = 3; + int i = 0; + int ismove = 0; + int movenum = 0; + + // both lcore and rcore have the same action: either + // move objs or have incoming objs + if(gcdeltal[core] > 0) { + ismove = 0; // have incoming objs + movenum++; + } else if(gcdeltal[core] < 0) { + ismove = 1; // have objs to move + movenum++; + } + if(gcdeltar[core] > 0) { + ismove = 0; // have incoming objs + movenum++; + } else if(gcdeltar[core] < 0) { + ismove = 1; // have objs to move + movenum++; + } + msgsize += (movenum == 0) ? 0 : 2 + movenum * 2; + + isMsgSending = true; + DynamicHeader msgHdr = tmc_udn_header_from_cpu(core); + + __tmc_udn_send_header_with_size_and_tag(msgHdr, msgsize, UDN0_DEMUX_TAG); // send header +#ifdef DEBUG + BAMBOO_DEBUGPRINT(0xbbbb); + BAMBOO_DEBUGPRINT(0xb000 + core); // targetcore +#endif + udn_send(GCSTARTCOMPACT); +#ifdef DEBUG + BAMBOO_DEBUGPRINT(GCSTARTCOMPACT); +#endif + udn_send(msgsize); +#ifdef DEBUG + BAMBOO_DEBUGPRINT_REG(msgsize); +#endif + udn_send(gcreloads[core]); +#ifdef DEBUG + BAMBOO_DEBUGPRINT_REG(gcreloads[core]); +#endif + if(movenum > 0) { + udn_send(movenum); +#ifdef DEBUG + BAMBOO_DEBUGPRINT_REG(movenum); +#endif + udn_send(ismove); +#ifdef DEBUG + BAMBOO_DEBUGPRINT_REG(ismove); +#endif + int dst = 0; + if(gcdeltal[core] != 0) { + LEFTNEIGHBOUR(core, &dst); + udn_send(abs(gcdeltal[core])); +#ifdef DEBUG + BAMBOO_DEBUGPRINT_REG(abs(gcdeltal[core])); +#endif + udn_send(dst); +#ifdef DEBUG + BAMBOO_DEBUGPRINT_REG(dst); +#endif + } + if(gcdeltar[core] != 0) { + RIGHTNEIGHBOUR(core, &dst); + udn_send(abs(gcdeltar[core])); +#ifdef DEBUG + BAMBOO_DEBUGPRINT_REG(abs(gcdeltar[core])); +#endif + udn_send(dst); +#ifdef DEBUG + BAMBOO_DEBUGPRINT_REG(dst); +#endif + } + } +#ifdef DEBUG + BAMBOO_DEBUGPRINT(0xffff); +#endif + + // end of sending this msg, set sand msg flag false + isMsgSending = false; + ++(self_numsendobjs); + // check if there are pending msgs + while(isMsgHanging) { + // get the msg from outmsgdata[] + // length + target + msg + outmsgleft = outmsgdata[outmsgindex]; + outmsgindex = (outmsgindex + 1) % BAMBOO_OUT_BUF_LENGTH; + int target = outmsgdata[outmsgindex]; + outmsgindex = (outmsgindex + 1) % BAMBOO_OUT_BUF_LENGTH; + // mark to start sending the msg + isMsgSending = true; + // Build the message header + msgHdr = tmc_udn_header_from_cpu(target); + __tmc_udn_send_header_with_size_and_tag(msgHdr, outmsgleft, UDN0_DEMUX_TAG); // send header +#ifdef DEBUG + BAMBOO_DEBUGPRINT(0xbbbb); + BAMBOO_DEBUGPRINT(0xb000 + target); // targetcore +#endif + while(outmsgleft-- > 0) { + udn_send(outmsgdata[outmsgindex]); +#ifdef DEBUG + BAMBOO_DEBUGPRINT_REG(outmsgdata[outmsgindex]); +#endif + outmsgindex = (outmsgindex + 1) % BAMBOO_OUT_BUF_LENGTH; + } +#ifdef DEBUG + BAMBOO_DEBUGPRINT(0xffff); +#endif + // mark to end sending the msg + isMsgSending = false; + BAMBOO_START_CRITICAL_SECTION_MSG(); +#ifdef DEBUG + BAMBOO_DEBUGPRINT(0xf001); +#endif + // check if there are still msg hanging + if(outmsgindex == outmsglast) { + // no more msgs + outmsgindex = outmsglast = 0; + isMsgHanging = false; + } + BAMBOO_CLOSE_CRITICAL_SECTION_MSG(); +#ifdef DEBUG + BAMBOO_DEBUGPRINT(0xf000); +#endif + } +} + void checkMarkStatue() { if((!gcwaitconfirm) || (waitconfirm && (numconfirm == 0))) { @@ -203,7 +336,7 @@ bool preGC() { } // compute load balance for all cores -void loadbalance() { +int loadbalance() { // compute load balance // initialize the deltas int i; @@ -291,6 +424,23 @@ void loadbalance() { } } } + + // compute heap top after load balancing + int heaptop = 0; + int localheaptop = 0; + int numblocks = 0; + INTPTR baseptr = 0; + int offset = 0; + for(i = 0; i < NUMCORES; ++i) { + NUMBLOCKS(gcreloads[i], &numblocks); + BASEPTR(i, numblocks, &baseptr); + OFFSET(gcreloads[i], &offset); + localheaptop = baseptr + offset; + if(localheaptop > heaptop) { + heaptop = localheaptop; + } + } + return heaptop; } void gc(struct garbagelist * stackptr) { @@ -334,14 +484,13 @@ void gc(struct garbagelist * stackptr) { send_msg_1(i, GCLOBJREQUEST); } while(numconfirm != 0) {} // wait for responses - loadbalance(); + int heaptop = loadbalance(); // TODO need to decide where to put large objects - // TODO cache all large objects for(i = 1; i < NUMCORES; ++i) { - //TODO send start compact messages to all cores - + //send start compact messages to all cores + transferCompactStart(i); } // compact phase @@ -364,7 +513,6 @@ void gc(struct garbagelist * stackptr) { break; } } // while(COMPACTPHASE == gcphase) - // TODO merge all mapping information gcphase = FLUSHPHASE; for(i = 1; i < NUMCORES; ++i) { @@ -462,8 +610,12 @@ void mark(bool isfirst, struct garbagelist * stackptr) { if(isfirst) { // enqueue root objs tomark(stackptr); + curr_heaptop = BAMBOO_CACHE_LINE_SIZE; + curr_heapbound = BAMBOO_SMEM_SIZE_L; + markedptrbound = 0; } + int isize = 0; // mark phase while(MARKPHASE == gcphase) { while(gc_moreItems2()) { @@ -488,9 +640,18 @@ void mark(bool isfirst, struct garbagelist * stackptr) { if(type == -1) { // nothing to do } - curr_heaptop += size; + ALIGNSIZE(size, &isize); + curr_heaptop += isize; + if(curr_heaptop > curr_heapbound) { + // change to another block + curr_heaptop = curr_heapbound + BAMBOO_CACHE_LINE_SIZE + isize; + curr_heapbound += BAMBOO_SMEM_SIZE; + } // mark this obj ((int *)ptr)[6] = 1; + if(ptr > markedptrbound) { + markedptrbound = ptr; + } } // scan all pointers in ptr unsigned INTPTR * pointer; @@ -548,45 +709,126 @@ void compact() { if(COMPACTPHASE != gcphase) { BAMBOO_EXIT(0xb003); } - curr_heaptop = 0; - struct markedObjItem * moi = mObjList.head; - bool iscopy = true; - while(moi != NULL) { - if((cinstruction == NULL) || (cinstruction->tomoveobjs == NULL) - || (curr_heaptop < cinstruction->tomoveobjs->starts[0])) { - // objs to compact - int type = ((int *)(moi->orig))[0]; - int size = 0; - if(type == -1) { - // do nothing - } - if(type < NUMCLASSES) { - // a normal object - size = classsize[type]; - moi->dst = curr_heaptop; - curr_heaptop += size; - if(iscopy) { - memcpy(moi->dst, moi->orig, size); - genputtable(pointertbl, moi->orig, moi->dst); - } - } else { - // an array - struct ArrayObject *ao=(struct ArrayObject *)ptr; - int elementsize=classsize[type]; - int length=ao->___length___; - size=sizeof(struct ArrayObject)+length*elementsize; - moi->dst = curr_heaptop; - curr_heaptop += size; - if(iscopy) { - memcpy(moi->dst, moi->orig, size); - genputtable(pointertbl, moi->orig, moi->dst); + + int numblocks = 0; // block num for dst heap for move + INTPTR curr_heapbase = 0; // real base virtual address of current heap block + INTPTR curr_heapptr = 0; // real virtual address of current heap top + int curr_offset = 0; // offset in current heap block + INTPTR orig_ptr; // real virtual address of obj to move + int curr_blockbase = 0; // real virtual address of current small block to check with + int curr_blockbound = 0; // real bound virtual address of current small blcok to check + int curr_base = 0; // real base virtual address of current heap block to check + int curr_bound = 0; // real bound virtual address of current heap block to check + int numblocks1 = 0; // block num for orig heap for move + curr_heaptop = curr_offset = BAMBOO_CACHE_LINE_SIZE; // logic heap top + curr_heapbound = BAMBOO_SMEM_SIZE_L; // logic heap bound + BASEPTR(BAMBOO_NUM_OF_CORE, numblocks, &curr_heapbase); + curr_heapptr = orig_ptr = curr_heapbase + curr_offset; + curr_base = curr_heapbase; + curr_bound = curr_heapbound; + curr_blockbase = curr_heapbase; + curr_blockbound = curr_blockbase + *((int*)curr_blockbase); + + // scan over all objs in this block, compact those scheduled to + // reside on this core + int type = 0; + int size = 0; + int mark = 0; + int isize = 0; + // loop stop when finishing either scanning all active objs or moving + // all objs to reside on this core + do { +innercompact: + // TODO all objs are aligned, how to filter out the paddings? + while((*((int*)orig_ptr)) == -2) { + orig_ptr++; + if(orig_ptr == curr_blockbound) { + curr_blockbase = curr_blockbound; + if(curr_blockbase == curr_bound) { + // end of current heap block, jump to next one + numblocks1++; + BASEPTR(BAMBOO_NUM_OF_CORE, numblocks1, &curr_base); + curr_bound = curr_base + BAMBOO_SMEM_SIZE; + curr_blockbase = curr_base; } + curr_blockbound = curr_blockbase + *((int*)curr_blockbase); + orig_ptr = curr_blockbase + BAMBOO_CACHE_LINE_SIZE; + goto innercompact; } - } else { - iscopy = false;; } - moi = moi->next; - } // while(moi != NULL) + // check the obj's type, size and mark flag + type = ((int *)orig_ptr)[0]; + size = 0; + if(type == -1) { + // end of this block, go to next one + curr_blockbase = curr_blockbound; + if(curr_blockbase == curr_bound) { + // end of current heap block, jump to next one + numblocks1++; + BASEPTR(BAMBOO_NUM_OF_CORE, numblocks1, &curr_base); + curr_bound = curr_base + BAMBOO_SMEM_SIZE; + curr_blockbase = curr_base; + } + curr_blockbound = curr_blockbase + *((int*)curr_blockbase); + orig_ptr = curr_blockbase + BAMBOO_CACHE_LINE_SIZE; + continue; + } else if(type < NUMCLASSES) { + // a normal object + size = classsize[type]; + } else { + // an array + struct ArrayObject *ao=(struct ArrayObject *)ptr; + int elementsize=classsize[type]; + int length=ao->___length___; + size=sizeof(struct ArrayObject)+length*elementsize; + } + mark = ((int *)orig_ptr)[6]; + if(mark == 1) { + // marked obj, copy it to current heap top + // check to see if remaining space is enough + ALIGNSIZE(size, &isize); + if((curr_heaptop + isize > cinstruction->loads) + && (cinstruction->movenum != 0)) { + // all objs to reside on this core have been moved + // the remainging objs should be moved to other cores + // STOP the loop + break; + } + if(curr_heaptop + isize > curr_heapbound) { + // fill the header of this block and then go to next block + curr_offset += curr_heapbound - curr_heaptop; + (*((int*)curr_heapbase)) = curr_offset; + curr_heaptop = curr_heapbound + BAMBOO_CACHE_LINE_SIZE; // header! + curr_heapbound += BAMBOO_SMEM_SIZE; + numblocks++; + BASEPTR(BAMBOO_NUM_OF_CORE, numblocks, &curr_heapbase); + curr_offset = BAMBOO_CACHE_LINE_SIZE; + curr_heapptr = curr_heapbase + curr_offset; + } + memcpy(curr_heapptr, orig_ptr, size); + genputtable(pointertbl, orig_ptr, curr_heapptr); // store the mapping infor + curr_heapptr += isize; + curr_offset += iseize; + } + // move to next obj + orig_ptr += size; + if(orig_ptr == curr_blockbound) { + curr_blockbase = curr_blockbound; + if(curr_blockbase == curr_bound) { + // end of current heap block, jump to next one + numblocks1++; + BASEPTR(BAMBOO_NUM_OF_CORE, numblocks1, &curr_base); + curr_bound = curr_base + BAMBOO_SMEM_SIZE; + curr_blockbase = curr_base; + } + curr_blockbound = curr_blockbase + *((int*)curr_blockbase); + orig_ptr = curr_blockbase + BAMBOO_CACHE_LINE_SIZE; + } + } while(orig_ptr < markedptrbound + 1); + // TODO move objs + + struct markedObjItem * moi = mObjList.head; + bool iscopy = true; if(moi == NULL) { if(cinstruction->incomingobjs != NULL) { for(int j = 0; j < cinstruction->incomingobjs->length; j++) { diff --git a/Robust/src/Runtime/multicoregarbage.h b/Robust/src/Runtime/multicoregarbage.h index 851ec0da..81000a62 100644 --- a/Robust/src/Runtime/multicoregarbage.h +++ b/Robust/src/Runtime/multicoregarbage.h @@ -9,7 +9,7 @@ #define BAMBOO_BASE_VA 0xd000000 #define BAMBOO_SMEM_SIZE 16 * BAMBOO_PAGE_SIZE #define BAMBOO_SMEM_SIZE_L 512 * BAMBOO_PAGE_SIZE -#define BAMBOO_LARGE_SMEM_BOUND BAMBOO_SMEM_SIZE_L * NUMCORES // NUMCORES = 62 +#define BAMBOO_LARGE_SMEM_BOUND BAMBOO_SMEM_SIZE_L*NUMCORES // NUMCORES = 62 struct garbagelist { int size; @@ -50,13 +50,16 @@ struct moveObj { int * dsts; int length; }; - +*/ struct compactInstr { - struct moveObj * tomoveobjs; - struct moveObj * incomingobjs; + int loads; + int ismove; + int movenum; + int * size2move; + int * dsts; struct largeObjItem * largeobjs; }; -*/ + enum GCPHASETYPE { MARKPHASE = 0x0, // 0x0 COMPACTPHASE, // 0x1 @@ -84,6 +87,7 @@ int gcdeltal[NUMCORES]; int gcdeltar[NUMCORES]; // compact instruction +INTPTR markedptrbound; struct compactInstr * cinstruction; // mapping of old address to new address struct genhashtable * pointertbl; @@ -91,11 +95,15 @@ int obj2map; int mappedobj; bool ismapped; +#define ALIGNSIZE(s, as) \ + (*((int*)as)) = s & (~BAMBOO_CACHE_LINE_MASK) + BAMBOO_CACHE_LINE_SIZE; + #define BLOCKINDEX(p, b) \ - if((p) < BAMBOO_LARGE_SMEM_BOUND) { \ - (*((int*)b)) = (p) / BAMBOO_SMEM_SIZE_L; \ + int t = (p) - BAMBOO_BASE_VA; \ + if(t < BAMBOO_LARGE_SMEM_BOUND) { \ + (*((int*)b)) = t / BAMBOO_SMEM_SIZE_L; \ } else { \ - (*((int*)b)) = NUMCORES + ((p) - BAMBOO_LARGE_SMEM_BOUND) / BAMBOO_SMEM_SIZE; \ + (*((int*)b)) = NUMCORES + (t - BAMBOO_LARGE_SMEM_BOUND) / BAMBOO_SMEM_SIZE; \ } #define RESIDECORE(p, x, y) \ @@ -132,6 +140,13 @@ bool ismapped; (*((int*)n)) = 1 + (s - BAMBOO_SMEM_SIZE_L) / BAMBOO_SMEM_SIZE; \ } +#define OFFSET(s, o) \ + if(s < BAMBOO_SMEM_SIZE_L) { \ + (*((int*)o)) = s; \ + } else { \ + (*((int*)o)) = (s - BAMBOO_SMEM_SIZE_L) % BAMBOO_SMEM_SIZE; \ + } + #define BASEPTR(c, n, p) \ int x; \ int y; \ @@ -161,9 +176,9 @@ bool ismapped; b += NUMCORES * n; \ } \ if(b < NUMCORES) { \ - (*((int*)p)) = b * BAMBOO_SMEM_SIZE_L; \ + (*((int*)p)) = BAMBOO_BASE_VA + b * BAMBOO_SMEM_SIZE_L; \ } else { \ - (*((int*)p)) = BAMBOO_LARGE_SMEM_BOUND + (b - NUMCORES) * BAMBOO_SMEM_SIZE; \ + (*((int*)p)) = BAMBOO_BASE_VA + BAMBOO_LARGE_SMEM_BOUND + (b - NUMCORES) * BAMBOO_SMEM_SIZE; \ } #define LEFTNEIGHBOUR(n, c) \ @@ -205,6 +220,7 @@ bool ismapped; void gc(struct garbagelist * stackptr); // core coordinator routine void gc_collect(struct garbagelist * stackptr); // core collector routine void transferMarkResults(); +void transferCompactStart(int corenum); void gc_enqueue(void *ptr); #endif diff --git a/Robust/src/Runtime/multicoreruntime.h b/Robust/src/Runtime/multicoreruntime.h index 1715d86d..1588ad5d 100644 --- a/Robust/src/Runtime/multicoreruntime.h +++ b/Robust/src/Runtime/multicoreruntime.h @@ -247,7 +247,6 @@ inline void cache_msg_5(int targetcore, unsigned long n0, unsigned long n1, unsi inline void cache_msg_6(int targetcore, unsigned long n0, unsigned long n1, unsigned long n2, unsigned long n3, unsigned long n4, unsigned long n5) __attribute__((always_inline)); inline void transferObject(struct transObjInfo * transObj); inline int receiveMsg(void) __attribute__((always_inline)); -inline int receiveGCMsg(void) __attribute__((always_inline)); #ifdef PROFILE inline void profileTaskStart(char * taskname) __attribute__((always_inline)); diff --git a/Robust/src/Runtime/multicoretask.c b/Robust/src/Runtime/multicoretask.c index 2a0c5b21..85c5c31b 100644 --- a/Robust/src/Runtime/multicoretask.c +++ b/Robust/src/Runtime/multicoretask.c @@ -1514,8 +1514,12 @@ msg: bamboo_smem_size = 0; bamboo_cur_msp = NULL; } else { - bamboo_smem_size = msgdata[2]; - bamboo_cur_msp = create_mspace_with_base((void*)msgdata[1], msgdata[2], 0); + // fill header to store the size of this mem block + (*((int*)msgdata[1])) = msgdata[2]; + bamboo_smem_size = msgdata[2] - BAMBOO_CACHE_LINE_SIZE; + bamboo_cur_msp = create_mspace_with_base((void*)(msgdata[1]+BAMBOO_CACHE_LINE_SIZE), + msgdata[2] - BAMBOO_CACHE_LINE_SIZE, + 0); } smemflag = true; break; @@ -1548,68 +1552,24 @@ msg: if(cinstruction == NULL) { cinstruction = (struct compactInstr *)RUNMALLOC(sizeof(struct compactInstr)); + cinstruction->size2move = (int *)RUNMALLOC(sizeof(int) * 2); + cinstruction->dsts = (int*)RUNMALLOC(sizeof(int) * 2); } else { - // clean up out of data info - if(cinstruction->tomoveobjs != NULL) { - RUNFREE(cinstruction->tomoveobjs->starts); - RUNFREE(cinstruction->tomoveobjs->ends); - RUNFREE(cinstruction->tomoveobjs->dststarts); - RUNFREE(cinstruction->tomoveobjs->dsts); - RUNFREE(cinstruction->tomoveobjs); - cinstruction->tomoveobjs = NULL; - } - if(cinstruction->incomingobjs != NULL) { - RUNFREE(); - RUNFREE(cinstruction->incomingobjs->starts); - RUNFREE(cinstruction->incomingobjs->dsts); - RUNFREE(cinstruction->incomingobjs); - cinstruction->incomingobjs = NULL; - } - // largeobj items should have been freed when processed - if(cinstruction->largeobjs != NULL) { - BAMBOO_EXIT(0xb005); - } + // clean up out of date info + cinstruction->movenum = 0; } - if(data1 > 2) { + cinstruction->loads = msgdata[2]; + if(data1 > 3) { // have objs to move etc. - int startindex = 2; + int startindex = 3; // process objs to move - int num = msgdata[startindex++]; - if(num > 0) { - cinstruction->tomoveobjs = - (struct moveObj *)RUNMALLOC(sizeof(struct moveObj)); - cinstruction->tomoveobjs->length = num; - cinstruction->tomoveobjs->starts = - (INTPTR *)RUNMALLOC(num * sizeof(INTPTR)); - cinstruction->tomoveobjs->ends = - (INTPTR *)RUNMALLOC(num * sizeof(INTPTR)); - cinstruction->tomoveobjs->dststarts = - (INTPTR *)RUNMALLOC(num * sizeof(INTPTR)); - cinstruction->tomoveobjs->dsts = - (INTPTR *)RUNMALLOC(num * sizeof(INTPTR)); - for(i = 0; i < num; i++) { - cinstruction->tomoveobjs->starts[i] = msgdata[startindex++]; - cinstruction->tomoveobjs->ends[i] = msgdata[startindex++]; - cinstruction->tomoveobjs->dsts[i] = msgdata[startindex++]; - cinstruction->tomoveobjs->dststarts[i] = msgdata[startindex++]; - } + cinstruction->movenum = msgdata[startindex++]; + cinstruction->ismove = msgdata[startindex++]; + for(i = 0; i < cinstruction->movenum; i++) { + cinstruction->size2move[i] = msgdata[startindex++]; + cinstruction->dsts[i] = msgdata[startindex++]; } - // process incoming objs - num = msgdata[startindex++]; - if(num > 0) { - cinstruction->incomingobjs = - (struct moveObj *)RUNMALLOC(sizeof(struct moveObj)); - cinstruction->incomingobjs->length = num; - cinstruction->incomingobjs->starts = - (INTPTR *)RUNMALLOC(num * sizeof(INTPTR)); - cinstruction->incomingobjs->dsts = - (INTPTR *)RUNMALLOC(num * sizeof(INTPTR)); - for(i = 0; i < num; i++) { - cinstruction->incomingobjs->starts[i] = msgdata[startindex++]; - cinstruction->incomingobjs->dsts[i] = msgdata[startindex++]; - } - } - // process large objs + /*// process large objs num = msgdata[startindex++]; for(i = 0; i < num; i++) { struct largeObjItem * loi = @@ -1622,7 +1582,7 @@ msg: cinstruction->largeobjs->next = loi; } cinstruction->largeobjs = loi; - } + }*/ } gcphase = COMPACTPHASE; break; -- 2.34.1