- GC_BAMBOO_DEBUGPRINT(0xe502);
-
- // enqueue static pointers global_defs_p
- if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
- struct garbagelist * staticptr=(struct garbagelist *)global_defs_p;
- while(staticptr != NULL) {
- for(i=0; i<staticptr->size; i++) {
- if(staticptr->array[i] != NULL) {
- markObj(staticptr->array[i]);
- }
- }
- staticptr = staticptr->next;
- }
- }
- GC_BAMBOO_DEBUGPRINT(0xe503);
-
-#ifdef TASK
- // enqueue objectsets
- if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
- for(i=0; i<NUMCLASSES; i++) {
- struct parameterwrapper ** queues =
- objectqueues[BAMBOO_NUM_OF_CORE][i];
- int length = numqueues[BAMBOO_NUM_OF_CORE][i];
- for(j = 0; j < length; ++j) {
- struct parameterwrapper * parameter = queues[j];
- struct ObjectHash * set=parameter->objectset;
- struct ObjectNode * ptr=set->listhead;
- while(ptr!=NULL) {
- markObj((void *)ptr->key);
- ptr=ptr->lnext;
- }
- }
- }
- }
-
- // euqueue current task descriptor
- if(currtpd != NULL) {
- GC_BAMBOO_DEBUGPRINT(0xe504);
- for(i=0; i<currtpd->numParameters; i++) {
- markObj(currtpd->parameterArray[i]);
- }
- }
-
- GC_BAMBOO_DEBUGPRINT(0xe505);
- // euqueue active tasks
- if(activetasks != NULL) {
- struct genpointerlist * ptr=activetasks->list;
- while(ptr!=NULL) {
- struct taskparamdescriptor *tpd=ptr->src;
- int i;
- for(i=0; i<tpd->numParameters; i++) {
- markObj(tpd->parameterArray[i]);
- }
- ptr=ptr->inext;
- }
- }
-
- GC_BAMBOO_DEBUGPRINT(0xe506);
- // enqueue cached transferred obj
- struct QueueItem * tmpobjptr = getHead(&objqueue);
- while(tmpobjptr != NULL) {
- struct transObjInfo * objInfo =
- (struct transObjInfo *)(tmpobjptr->objectptr);
- markObj(objInfo->objptr);
- tmpobjptr = getNextQueueItem(tmpobjptr);
- }
-
- GC_BAMBOO_DEBUGPRINT(0xe507);
- // enqueue cached objs to be transferred
- struct QueueItem * item = getHead(totransobjqueue);
- while(item != NULL) {
- struct transObjInfo * totransobj =
- (struct transObjInfo *)(item->objectptr);
- markObj(totransobj->objptr);
- item = getNextQueueItem(item);
- } // while(item != NULL)
-
- GC_BAMBOO_DEBUGPRINT(0xe508);
- // enqueue lock related info
- for(i = 0; i < runtime_locklen; ++i) {
- markObj((void *)(runtime_locks[i].redirectlock));
- if(runtime_locks[i].value != NULL) {
- markObj((void *)(runtime_locks[i].value));
- }
- }
- GC_BAMBOO_DEBUGPRINT(0xe509);
-#endif
-
-#ifdef MGC
- // enqueue global thread queue
- if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
- lockthreadqueue();
- unsigned int thread_counter = *((unsigned int*)(bamboo_thread_queue+1));
- if(thread_counter > 0) {
- unsigned int start = *((unsigned int*)(bamboo_thread_queue+2));
- for(i = thread_counter; i > 0; i--) {
- markObj((void *)bamboo_thread_queue[4+start]);
- start = (start+1)&bamboo_max_thread_num_mask;
- }
- }
- }
-
- // enqueue the bamboo_threadlocks
- for(i = 0; i < bamboo_threadlocks.index; i++) {
- markObj((void *)(bamboo_threadlocks.locks[i].object));
- }
-
- // enqueue the bamboo_current_thread
- if(bamboo_current_thread != 0) {
- markObj((void *)bamboo_current_thread);
- }
-
- GC_BAMBOO_DEBUGPRINT(0xe50a);
-#endif
-} // void tomark(struct garbagelist * stackptr)
-
-inline void mark(bool isfirst,
- struct garbagelist * stackptr) {
- if(BAMBOO_NUM_OF_CORE == 0) GC_BAMBOO_DEBUGPRINT(0xed01);
- if(isfirst) {
- if(BAMBOO_NUM_OF_CORE == 0) GC_BAMBOO_DEBUGPRINT(0xed02);
- // enqueue root objs
- tomark(stackptr);
- gccurr_heaptop = 0; // record the size of all active objs in this core
- // aligned but does not consider block boundaries
- gcmarkedptrbound = 0;
- }
- if(BAMBOO_NUM_OF_CORE == 0) GC_BAMBOO_DEBUGPRINT(0xed03);
- unsigned int isize = 0;
- bool checkfield = true;
- bool sendStall = false;
- // mark phase
- while(MARKPHASE == gcphase) {
- if(BAMBOO_NUM_OF_CORE == 0) GC_BAMBOO_DEBUGPRINT(0xed04);
- while(true) {
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- bool hasItems = gc_moreItems2_I();
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- GC_BAMBOO_DEBUGPRINT(0xed05);
- if(!hasItems) {
- break;
- }
- sendStall = false;
- gcbusystatus = true;
- checkfield = true;
- void * ptr = gc_dequeue2_I();
-
- GC_BAMBOO_DEBUGPRINT_REG(ptr);
- unsigned int size = 0;
- unsigned int isize = 0;
- unsigned int type = 0;
- // check if it is a shared obj
- if(ISSHAREDOBJ(ptr)) {
- // a shared obj, check if it is a local obj on this core
- unsigned int host = hostcore(ptr);
- bool islocal = (host == BAMBOO_NUM_OF_CORE);
- if(islocal) {
- bool isnotmarked = (((int *)ptr)[BAMBOOMARKBIT] == DISCOVERED);
- if(isLarge(ptr, &type, &size) && isnotmarked) {
- // ptr is a large object and not marked or enqueued
- GC_BAMBOO_DEBUGPRINT(0xecec);
- GC_BAMBOO_DEBUGPRINT_REG(ptr);
- GC_BAMBOO_DEBUGPRINT_REG(*((int*)ptr));
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- gc_lobjenqueue_I(ptr, size, BAMBOO_NUM_OF_CORE);
- gcnumlobjs++;
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- // mark this obj
- ((int *)ptr)[BAMBOOMARKBIT] = MARKED;
- BAMBOO_CACHE_FLUSH_LINE(ptr);
-#ifdef GC_TBL_DEBUG
- // for test
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)ptr)]=3;
-#endif
- } else if(isnotmarked) {
- // ptr is an unmarked active object on this core
- ALIGNSIZE(size, &isize);
- gccurr_heaptop += isize;
- GC_BAMBOO_DEBUGPRINT(0xaaaa);
- GC_BAMBOO_DEBUGPRINT_REG(ptr);
- GC_BAMBOO_DEBUGPRINT_REG(isize);
- GC_BAMBOO_DEBUGPRINT(((int *)(ptr))[0]);
- // mark this obj
- ((int *)ptr)[BAMBOOMARKBIT] = MARKED;
- BAMBOO_CACHE_FLUSH_LINE(ptr);
-#ifdef GC_TBL_DEBUG
- // for test
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)ptr)]=2;
-#endif
-
- if((unsigned int)(ptr + size) > (unsigned int)gcmarkedptrbound) {
- gcmarkedptrbound = (unsigned int)(ptr + size);
- } // if(ptr + size > gcmarkedptrbound)
- } else {
- // ptr is not an active obj or has been marked
- checkfield = false;
- } // if(isLarge(ptr, &type, &size)) else ...
- }
-#ifdef GC_TBL_DEBUG
- else {
- tprintf("Error mark: %x, %d, %d \n", (int)ptr, BAMBOO_NUM_OF_CORE,
- hostcore(ptr));
- BAMBOO_EXIT(0xb011);
- }
-#endif /* can never reach here
- else {
- // check if this obj has been forwarded
- if(!MGCHashcontains(gcforwardobjtbl, (int)ptr)) {
- // send a msg to host informing that ptr is active
- send_msg_2(host, GCMARKEDOBJ, ptr, false);
- gcself_numsendobjs++;
- MGCHashadd(gcforwardobjtbl, (int)ptr);
- }
- checkfield = false;
- }// if(isLocal(ptr)) else ...*/
- } // if(ISSHAREDOBJ(ptr))
- GC_BAMBOO_DEBUGPRINT(0xed06);
-
- if(checkfield) {
- // scan all pointers in ptr
- unsigned int * pointer;
- pointer=pointerarray[type];
- if (pointer==0) {
- /* Array of primitives */
- /* Do nothing */
- } else if (((unsigned int)pointer)==1) {
- /* Array of pointers */
- struct ArrayObject *ao=(struct ArrayObject *) ptr;
- int length=ao->___length___;
- int j;
- for(j=0; j<length; j++) {
- void *objptr =
- ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
- markObj(objptr);
- }
- } else {
- unsigned int size=pointer[0];
- int i;
- for(i=1; i<=size; i++) {
- unsigned int offset=pointer[i];
- void * objptr=*((void **)(((char *)ptr)+offset));
- markObj(objptr);
- }
- } // if (pointer==0) else if ... else ...
- {
- pointer=pointerarray[OBJECTTYPE];
- //handle object class
- unsigned int size=pointer[0];
- int i;
- for(i=1; i<=size; i++) {
- unsigned int offset=pointer[i];
- void * objptr=*((void **)(((char *)ptr)+offset));
- markObj(objptr);
- }
- }
- } // if(checkfield)
- } // while(gc_moreItems2())
- GC_BAMBOO_DEBUGPRINT(0xed07);
- gcbusystatus = false;
- // send mark finish msg to core coordinator
- if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
- GC_BAMBOO_DEBUGPRINT(0xed08);
- int entry_index = 0;
- if(waitconfirm) {
- // phase 2
- entry_index = (gcnumsrobjs_index == 0) ? 1 : 0;
- } else {
- // phase 1
- entry_index = gcnumsrobjs_index;
- }
- gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
- gcnumsendobjs[entry_index][BAMBOO_NUM_OF_CORE]=gcself_numsendobjs;
- gcnumreceiveobjs[entry_index][BAMBOO_NUM_OF_CORE]=gcself_numreceiveobjs;
- gcloads[BAMBOO_NUM_OF_CORE] = gccurr_heaptop;
- } else {
- if(!sendStall) {
- GC_BAMBOO_DEBUGPRINT(0xed09);
- send_msg_4(STARTUPCORE, GCFINISHMARK, BAMBOO_NUM_OF_CORE,
- gcself_numsendobjs, gcself_numreceiveobjs, false);
- sendStall = true;
- }
- } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE) ...
- GC_BAMBOO_DEBUGPRINT(0xed0a);
-
- if(BAMBOO_NUM_OF_CORE == STARTUPCORE) {
- GC_BAMBOO_DEBUGPRINT(0xed0b);
- return;
- }
- } // while(MARKPHASE == gcphase)
-
- BAMBOO_CACHE_MF();
-} // mark()
-
-inline void compact2Heaptophelper_I(unsigned int coren,
- unsigned int* p,
- unsigned int* numblocks,
- unsigned int* remain) {
- unsigned int b;
- unsigned int memneed = gcrequiredmems[coren] + BAMBOO_CACHE_LINE_SIZE;
- if(STARTUPCORE == coren) {
- gctomove = true;
- gcmovestartaddr = *p;
- gcdstcore = gctopcore;
- gcblock2fill = *numblocks + 1;
- } else {
- send_msg_4(coren, GCMOVESTART, gctopcore, *p, (*numblocks) + 1, false);
- }
- GC_BAMBOO_DEBUGPRINT_REG(coren);
- GC_BAMBOO_DEBUGPRINT_REG(gctopcore);
- GC_BAMBOO_DEBUGPRINT_REG(*p);
- GC_BAMBOO_DEBUGPRINT_REG(*numblocks+1);
- if(memneed < *remain) {
- GC_BAMBOO_DEBUGPRINT(0xd104);
- *p = *p + memneed;
- gcrequiredmems[coren] = 0;
- gcloads[gctopcore] += memneed;
- *remain = *remain - memneed;
- } else {
- GC_BAMBOO_DEBUGPRINT(0xd105);
- // next available block
- *p = *p + *remain;
- gcfilledblocks[gctopcore] += 1;
- unsigned int newbase = 0;
- BASEPTR(gctopcore, gcfilledblocks[gctopcore], &newbase);
- gcloads[gctopcore] = newbase;
- gcrequiredmems[coren] -= *remain - BAMBOO_CACHE_LINE_SIZE;
- gcstopblock[gctopcore]++;
- gctopcore = NEXTTOPCORE(gctopblock);
- gctopblock++;
- *numblocks = gcstopblock[gctopcore];
- *p = gcloads[gctopcore];
- BLOCKINDEX(*p, &b);
- *remain=(b<NUMCORES4GC) ?
- ((BAMBOO_SMEM_SIZE_L)-((*p)%(BAMBOO_SMEM_SIZE_L)))
- : ((BAMBOO_SMEM_SIZE)-((*p)%(BAMBOO_SMEM_SIZE)));
- GC_BAMBOO_DEBUGPRINT(0xd106);
- GC_BAMBOO_DEBUGPRINT_REG(gctopcore);
- GC_BAMBOO_DEBUGPRINT_REG(*p);
- GC_BAMBOO_DEBUGPRINT_REG(b);
- GC_BAMBOO_DEBUGPRINT_REG(*remain);
- } // if(memneed < remain)
- gcmovepending--;
-} // void compact2Heaptophelper_I(int, int*, int*, int*)
-
-inline void compact2Heaptop() {
- // no cores with spare mem and some cores are blocked with pending move
- // find the current heap top and make them move to the heap top
- unsigned int p;
- unsigned int numblocks = gcfilledblocks[gctopcore];
- p = gcloads[gctopcore];
- unsigned int b;
- BLOCKINDEX(p, &b);
- unsigned int remain = (b<NUMCORES4GC) ?
- ((BAMBOO_SMEM_SIZE_L)-(p%(BAMBOO_SMEM_SIZE_L)))
- : ((BAMBOO_SMEM_SIZE)-(p%(BAMBOO_SMEM_SIZE)));
- // check if the top core finishes
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- if(gccorestatus[gctopcore] != 0) {
- GC_BAMBOO_DEBUGPRINT(0xd101);
- GC_BAMBOO_DEBUGPRINT_REG(gctopcore);
- // let the top core finishes its own work first
- compact2Heaptophelper_I(gctopcore, &p, &numblocks, &remain);
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- return;
- }
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
-
- GC_BAMBOO_DEBUGPRINT(0xd102);
- GC_BAMBOO_DEBUGPRINT_REG(gctopcore);
- GC_BAMBOO_DEBUGPRINT_REG(p);
- GC_BAMBOO_DEBUGPRINT_REG(b);
- GC_BAMBOO_DEBUGPRINT_REG(remain);
- for(int i = 0; i < NUMCORES4GC; i++) {
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- if((gccorestatus[i] != 0) && (gcrequiredmems[i] > 0)) {
- GC_BAMBOO_DEBUGPRINT(0xd103);
- compact2Heaptophelper_I(i, &p, &numblocks, &remain);
- if(gccorestatus[gctopcore] != 0) {
- GC_BAMBOO_DEBUGPRINT(0xd101);
- GC_BAMBOO_DEBUGPRINT_REG(gctopcore);
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- // the top core is not free now
- return;
- }
- } // if((gccorestatus[i] != 0) && (gcrequiredmems[i] > 0))
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- } // for(i = 0; i < NUMCORES4GC; i++)
- GC_BAMBOO_DEBUGPRINT(0xd106);
-} // void compact2Heaptop()
-
-inline void resolvePendingMoveRequest() {
- GC_BAMBOO_DEBUGPRINT(0xeb01);
- GC_BAMBOO_DEBUGPRINT(0xeeee);
- for(int k = 0; k < NUMCORES4GC; k++) {
- GC_BAMBOO_DEBUGPRINT(0xf000+k);
- GC_BAMBOO_DEBUGPRINT_REG(gccorestatus[k]);
- GC_BAMBOO_DEBUGPRINT_REG(gcloads[k]);
- GC_BAMBOO_DEBUGPRINT_REG(gcfilledblocks[k]);
- GC_BAMBOO_DEBUGPRINT_REG(gcstopblock[k]);
- }
- GC_BAMBOO_DEBUGPRINT(0xffff);
- int i;
- int j;
- bool nosparemem = true;
- bool haspending = false;
- bool hasrunning = false;
- bool noblock = false;
- unsigned int dstcore = 0; // the core who need spare mem
- unsigned int sourcecore = 0; // the core who has spare mem
- for(i = j = 0; (i < NUMCORES4GC) && (j < NUMCORES4GC); ) {
- if(nosparemem) {
- // check if there are cores with spare mem
- if(gccorestatus[i] == 0) {
- // finished working, check if it still have spare mem
- if(gcfilledblocks[i] < gcstopblock[i]) {
- // still have spare mem
- nosparemem = false;
- sourcecore = i;
- } // if(gcfilledblocks[i] < gcstopblock[i]) else ...
- }
- i++;
- } // if(nosparemem)
- if(!haspending) {
- if(gccorestatus[j] != 0) {
- // not finished, check if it has pending move requests
- if((gcfilledblocks[j]==gcstopblock[j])&&(gcrequiredmems[j]>0)) {
- dstcore = j;
- haspending = true;
- } else {
- hasrunning = true;
- } // if((gcfilledblocks[i] == gcstopblock[i])...) else ...
- } // if(gccorestatus[i] == 0) else ...
- j++;
- } // if(!haspending)
- if(!nosparemem && haspending) {
- // find match
- unsigned int tomove = 0;
- unsigned int startaddr = 0;
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- gcrequiredmems[dstcore] = assignSpareMem_I(sourcecore,
- gcrequiredmems[dstcore],
- &tomove,
- &startaddr);
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- GC_BAMBOO_DEBUGPRINT(0xeb02);
- GC_BAMBOO_DEBUGPRINT_REG(sourcecore);
- GC_BAMBOO_DEBUGPRINT_REG(dstcore);
- GC_BAMBOO_DEBUGPRINT_REG(startaddr);
- GC_BAMBOO_DEBUGPRINT_REG(tomove);
- if(STARTUPCORE == dstcore) {
- GC_BAMBOO_DEBUGPRINT(0xeb03);
- gcdstcore = sourcecore;
- gctomove = true;
- gcmovestartaddr = startaddr;
- gcblock2fill = tomove;
- } else {
- GC_BAMBOO_DEBUGPRINT(0xeb04);
- send_msg_4(dstcore, GCMOVESTART, sourcecore,
- startaddr, tomove, false);
- }
- gcmovepending--;
- nosparemem = true;
- haspending = false;
- noblock = true;
- }
- } // for(i = 0; i < NUMCORES4GC; i++)
- GC_BAMBOO_DEBUGPRINT(0xcccc);
- GC_BAMBOO_DEBUGPRINT_REG(hasrunning);
- GC_BAMBOO_DEBUGPRINT_REG(haspending);
- GC_BAMBOO_DEBUGPRINT_REG(noblock);
-
- if(!hasrunning && !noblock) {
- gcphase = SUBTLECOMPACTPHASE;
- compact2Heaptop();
- }
-
-} // void resovePendingMoveRequest()
-
-struct moveHelper {
- unsigned int numblocks; // block num for heap
- unsigned int base; // base virtual address of current heap block
- unsigned int ptr; // virtual address of current heap top
- unsigned int offset; // offset in current heap block
- unsigned int blockbase; // virtual address of current small block to check
- unsigned int blockbound; // bound virtual address of current small blcok
- unsigned int sblockindex; // index of the small blocks
- unsigned int top; // real size of current heap block to check
- unsigned int bound; // bound size of current heap block to check
-}; // struct moveHelper
-
-// If out of boundary of valid shared memory, return false, else return true
-inline bool nextSBlock(struct moveHelper * orig) {
- orig->blockbase = orig->blockbound;
-
- bool sbchanged = false;
- unsigned int origptr = orig->ptr;
- unsigned int blockbase = orig->blockbase;
- unsigned int blockbound = orig->blockbound;
- unsigned int bound = orig->bound;
- GC_BAMBOO_DEBUGPRINT(0xecc0);
- GC_BAMBOO_DEBUGPRINT_REG(blockbase);
- GC_BAMBOO_DEBUGPRINT_REG(blockbound);
- GC_BAMBOO_DEBUGPRINT_REG(bound);
- GC_BAMBOO_DEBUGPRINT_REG(origptr);
-outernextSBlock:
- // check if across a big block
- // TODO now do not zero out the whole memory, maybe the last two conditions
- // are useless now
- if((blockbase>=bound)||(origptr>=bound)
- ||((origptr!=NULL)&&(*((int*)origptr))==0)||((*((int*)blockbase))==0)) {
-innernextSBlock:
- // end of current heap block, jump to next one
- orig->numblocks++;
- GC_BAMBOO_DEBUGPRINT(0xecc1);
- GC_BAMBOO_DEBUGPRINT_REG(orig->numblocks);
- BASEPTR(BAMBOO_NUM_OF_CORE, orig->numblocks, &(orig->base));
- GC_BAMBOO_DEBUGPRINT(orig->base);
- if(orig->base >= gcbaseva + BAMBOO_SHARED_MEM_SIZE) {
- // out of boundary
- orig->ptr = orig->base; // set current ptr to out of boundary too
- return false;
- }
- orig->blockbase = orig->base;
- orig->sblockindex =
- (unsigned int)(orig->blockbase-gcbaseva)/BAMBOO_SMEM_SIZE;
- sbchanged = true;
- unsigned int blocknum = 0;
- BLOCKINDEX(orig->base, &blocknum);
- if(bamboo_smemtbl[blocknum] == 0) {
-#ifdef GC_TBL_DEBUG
- if(blocknum >= gcnumblock) {
- BAMBOO_EXIT(0xb012);
- }
-#endif
- // goto next block
- goto innernextSBlock;
- }
- // check the bamboo_smemtbl to decide the real bound
- orig->bound = orig->base + bamboo_smemtbl[blocknum];
- } else if(0 == (orig->blockbase%BAMBOO_SMEM_SIZE)) {
- orig->sblockindex += 1;
- sbchanged = true;
- } // if((orig->blockbase >= orig->bound) || (orig->ptr >= orig->bound)...
-
- // check if this sblock should be skipped or have special start point
- int sbstart = gcsbstarttbl[orig->sblockindex];
-#ifdef GC_TBL_DEBUG
- if((orig->sblockindex) >= gcsbstarttbl_len) {
- BAMBOO_EXIT(0xb013);
- }
-#endif
- if(sbstart == -1) {
- // goto next sblock
- GC_BAMBOO_DEBUGPRINT(0xecc2);
- orig->sblockindex += 1;
- orig->blockbase += BAMBOO_SMEM_SIZE;
- goto outernextSBlock;
- } else if((sbstart != 0) && (sbchanged)) {
- // the first time to access this SBlock
- GC_BAMBOO_DEBUGPRINT(0xecc3);
- // not start from the very beginning
- orig->blockbase = sbstart;
- } // if(gcsbstarttbl[orig->sblockindex] == -1) else ...
-
- // setup information for this sblock
- orig->blockbound = orig->blockbase+(unsigned int)*((int*)(orig->blockbase));
- orig->offset = BAMBOO_CACHE_LINE_SIZE;
- orig->ptr = orig->blockbase + orig->offset;
- GC_BAMBOO_DEBUGPRINT(0xecc4);
- GC_BAMBOO_DEBUGPRINT_REG(orig->base);
- GC_BAMBOO_DEBUGPRINT_REG(orig->bound);
- GC_BAMBOO_DEBUGPRINT_REG(orig->ptr);
- GC_BAMBOO_DEBUGPRINT_REG(orig->blockbound);
- GC_BAMBOO_DEBUGPRINT_REG(orig->blockbase);
- GC_BAMBOO_DEBUGPRINT_REG(orig->offset);
- if(orig->ptr >= orig->bound) {
- // met a lobj, move to next block
- goto innernextSBlock;
- }
-
- return true;
-} // bool nextSBlock(struct moveHelper * orig)
-
-// return false if there are no available data to compact
-inline bool initOrig_Dst(struct moveHelper * orig,
- struct moveHelper * to) {
- // init the dst ptr
- to->numblocks = 0;
- to->top = to->offset = BAMBOO_CACHE_LINE_SIZE;
- to->bound = BAMBOO_SMEM_SIZE_L;
- BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
-
- GC_BAMBOO_DEBUGPRINT(0xef01);
- GC_BAMBOO_DEBUGPRINT_REG(to->base);
- unsigned int tobase = to->base;
- to->ptr = tobase + to->offset;
-#ifdef GC_CACHE_ADAPT
- // initialize the gc_cache_revise_information
- gc_cache_revise_infomation.to_page_start_va = to->ptr;
- unsigned int toindex = (unsigned int)(tobase-gcbaseva)/(BAMBOO_PAGE_SIZE);
- gc_cache_revise_infomation.to_page_end_va = (BAMBOO_PAGE_SIZE)*
- (toindex+1);
- gc_cache_revise_infomation.to_page_index = toindex;
- gc_cache_revise_infomation.orig_page_start_va = -1;
-#endif // GC_CACHE_ADAPT
-
- // init the orig ptr
- orig->numblocks = 0;
- orig->base = tobase;
- unsigned int blocknum = 0;
- BLOCKINDEX(orig->base, &blocknum);
- unsigned int origbase = orig->base;
- // check the bamboo_smemtbl to decide the real bound
- orig->bound = origbase + (unsigned int)bamboo_smemtbl[blocknum];
-#ifdef GC_TBL_DEBUG
- if((orig->sblockindex) >= gcsbstarttbl_len) {
- BAMBOO_EXIT(0xb014);
- }
-#endif
- orig->blockbase = origbase;
- orig->sblockindex = (unsigned int)(origbase - gcbaseva) / BAMBOO_SMEM_SIZE;
- GC_BAMBOO_DEBUGPRINT(0xef02);
- GC_BAMBOO_DEBUGPRINT_REG(origbase);
- GC_BAMBOO_DEBUGPRINT_REG(orig->sblockindex);
- GC_BAMBOO_DEBUGPRINT_REG(gcsbstarttbl);
- GC_BAMBOO_DEBUGPRINT_REG(gcsbstarttbl[orig->sblockindex]);
-
- int sbstart = gcsbstarttbl[orig->sblockindex];
-#ifdef GC_TBL_DEBUG
- if((orig->sblockindex) >= gcsbstarttbl_len) {
- BAMBOO_EXIT(0xb015);
- }
-#endif
- if(sbstart == -1) {
- GC_BAMBOO_DEBUGPRINT(0xef03);
- // goto next sblock
- orig->blockbound =
- gcbaseva+BAMBOO_SMEM_SIZE*(orig->sblockindex+1);
- return nextSBlock(orig);
- } else if(sbstart != 0) {
- GC_BAMBOO_DEBUGPRINT(0xef04);
- orig->blockbase = sbstart;
- }
- GC_BAMBOO_DEBUGPRINT(0xef05);
- orig->blockbound = orig->blockbase + *((int*)(orig->blockbase));
- orig->offset = BAMBOO_CACHE_LINE_SIZE;
- orig->ptr = orig->blockbase + orig->offset;
- GC_BAMBOO_DEBUGPRINT(0xef06);
- GC_BAMBOO_DEBUGPRINT_REG(orig->base);
-
- return true;
-} // bool initOrig_Dst(struct moveHelper * orig, struct moveHelper * to)
-
-inline void nextBlock(struct moveHelper * to) {
- to->top = to->bound + BAMBOO_CACHE_LINE_SIZE; // header!
- to->bound += BAMBOO_SMEM_SIZE;
- to->numblocks++;
- BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
- to->offset = BAMBOO_CACHE_LINE_SIZE;
- to->ptr = to->base + to->offset;
-} // void nextBlock(struct moveHelper * to)
-
-#ifdef GC_CACHE_ADAPT
-inline void samplingDataConvert(unsigned int current_ptr) {
- unsigned int tmp_factor =
- current_ptr-gc_cache_revise_infomation.to_page_start_va;
- unsigned int topage=gc_cache_revise_infomation.to_page_index;
- unsigned int oldpage = gc_cache_revise_infomation.orig_page_index;
- int * newtable=&gccachesamplingtbl_r[topage];
- int * oldtable=&gccachesamplingtbl[oldpage];
-
- for(int tt = 0; tt < NUMCORESACTIVE; tt++) {
- (*newtable) = ((*newtable)+(*oldtable)*tmp_factor);
- newtable=(int*)(((char *)newtable)+size_cachesamplingtbl_local_r);
- oldtable=(int*) (((char *)oldtable)+size_cachesamplingtbl_local);
- }
-} // inline void samplingDataConvert(int)
-
-inline void completePageConvert(struct moveHelper * orig,
- struct moveHelper * to,
- unsigned int current_ptr,
- bool closeToPage) {
- unsigned int ptr = 0;
- unsigned int tocompare = 0;
- if(closeToPage) {
- ptr = to->ptr;
- tocompare = gc_cache_revise_infomation.to_page_end_va;
- } else {
- ptr = orig->ptr;
- tocompare = gc_cache_revise_infomation.orig_page_end_va;
- }
- if((unsigned int)ptr >= (unsigned int)tocompare) {
- // end of an orig/to page
- // compute the impact of this page for the new page
- samplingDataConvert(current_ptr);
- // prepare for an new orig page
- unsigned int tmp_index =
- (unsigned int)((unsigned int)orig->ptr-gcbaseva)/(BAMBOO_PAGE_SIZE);
- gc_cache_revise_infomation.orig_page_start_va = orig->ptr;
- gc_cache_revise_infomation.orig_page_end_va = gcbaseva +
- (BAMBOO_PAGE_SIZE)*(unsigned int)(tmp_index+1);
- gc_cache_revise_infomation.orig_page_index = tmp_index;
- gc_cache_revise_infomation.to_page_start_va = to->ptr;
- if(closeToPage) {
- gc_cache_revise_infomation.to_page_end_va = gcbaseva+(BAMBOO_PAGE_SIZE)
- *(((unsigned int)(to->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE)+1);
- gc_cache_revise_infomation.to_page_index =
- ((unsigned int)(to->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE);
- }
- }
-} // inline void completePageConvert(...)
-#endif // GC_CACHE_ADAPT
-
-// endaddr does not contain spaces for headers
-inline bool moveobj(struct moveHelper * orig,
- struct moveHelper * to,
- unsigned int stopblock) {
- if(stopblock == 0) {
- return true;
- }
-
- GC_BAMBOO_DEBUGPRINT(0xe201);
- GC_BAMBOO_DEBUGPRINT_REG(orig->ptr);
- GC_BAMBOO_DEBUGPRINT_REG(to->ptr);
-#ifdef GC_TBL_DEBUG
- unsigned int bkptr = (unsigned int)(orig->ptr);
-
- if((unsigned int)(to->ptr) > (unsigned int)(orig->ptr)) {
- tprintf("Error to->ptr > orig->ptr: %x, %x \n", (int)(to->ptr), (int)(orig->ptr));
- BAMBOO_EXIT(0xb016);
- }
-#endif
-
- int type = 0;
- unsigned int size = 0;
- unsigned int isize = 0;
-innermoveobj:
- /*while((*((char*)(orig->ptr))) == (char)(-2)) {
- orig->ptr = (unsigned int)((void*)(orig->ptr) + 1);
- }*/
-#ifdef GC_CACHE_ADAPT
- completePageConvert(orig, to, to->ptr, false);
-#endif
- unsigned int origptr = (unsigned int)(orig->ptr);
- unsigned int origbound = (unsigned int)orig->bound;
- unsigned int origblockbound = (unsigned int)orig->blockbound;
- if((origptr >= origbound) || (origptr == origblockbound)) {
- if(!nextSBlock(orig)) {
- // finished, no more data
-#ifdef GC_TBL_DEBUG
- tprintf("AAAA %x \n", (int)(orig->ptr));
-#endif
- return true;
- }
- goto innermoveobj;
- }
- GC_BAMBOO_DEBUGPRINT(0xe202);
- GC_BAMBOO_DEBUGPRINT_REG(origptr);
- GC_BAMBOO_DEBUGPRINT(((int *)(origptr))[0]);
- // check the obj's type, size and mark flag
- type = ((int *)(origptr))[0];
- size = 0;
- if(type == 0) {
- // end of this block, go to next one
- if(!nextSBlock(orig)) {
- // finished, no more data
-#ifdef GC_TBL_DEBUG
- tprintf("BBBB %x \n", (int)(orig->ptr));
-#endif
- return true;
- }
- goto innermoveobj;
- } else if(type < NUMCLASSES) {
- // a normal object
- size = classsize[type];
- } else {
- // an array
- struct ArrayObject *ao=(struct ArrayObject *)(origptr);
- unsigned int elementsize=classsize[type];
- unsigned int length=ao->___length___;
- size=(unsigned int)sizeof(struct ArrayObject)
- +(unsigned int)(length*elementsize);
- }
- GC_BAMBOO_DEBUGPRINT(0xe203);
- GC_BAMBOO_DEBUGPRINT_REG(origptr);
- GC_BAMBOO_DEBUGPRINT_REG(size);
- ALIGNSIZE(size, &isize); // no matter is the obj marked or not
- // should be able to across
-#ifdef GC_TBL_DEBUG
- int sindex = OBJMAPPINGINDEX((unsigned int)bkptr);
- int eindex = OBJMAPPINGINDEX((unsigned int)(origptr));
- for(int tmpi = sindex+1; tmpi < eindex; tmpi++) {
- if((gcmappingtbl[tmpi] != 0) &&
- (hostcore(gcbaseva+bamboo_baseobjsize*tmpi)==BAMBOO_NUM_OF_CORE) &&
- (hostcore(gcbaseva+bamboo_baseobjsize*(tmpi+1))==BAMBOO_NUM_OF_CORE)) {
- tprintf("Error moveobj --: %x, %x, %x, %d, %x \n", (int)bkptr,
- (int)origptr, (int)(gcbaseva+bamboo_baseobjsize*tmpi),
- (int)gcmappingtbl[tmpi], (int)(*((char*)(bkptr))));
- BAMBOO_EXIT(0xb017);
- }
- }
-#endif
- if(((int *)(origptr))[BAMBOOMARKBIT] == MARKED) {
- unsigned int totop = (unsigned int)to->top;
- unsigned int tobound = (unsigned int)to->bound;
- GC_BAMBOO_DEBUGPRINT(0xe204);
-#ifdef GC_PROFILE
-#ifdef MGC_SPEC
- if((STARTUPCORE != BAMBOO_NUM_OF_CORE) || gc_profile_flag) {
-#endif
- gc_num_liveobj++;
-#ifdef MGC_SPEC
- }
-#endif
-#endif
- // marked obj, copy it to current heap top
- // check to see if remaining space is enough
- if((unsigned int)(totop + isize) > tobound) {
- // fill 0 indicating the end of this block
- BAMBOO_MEMSET_WH(to->ptr, '\0', tobound - totop);
- // fill the header of this block and then go to next block
- to->offset += tobound - totop;
- BAMBOO_MEMSET_WH(to->base, '\0', BAMBOO_CACHE_LINE_SIZE);
- (*((int*)(to->base))) = to->offset;
-#ifdef GC_CACHE_ADAPT
- unsigned int tmp_ptr = to->ptr;
-#endif // GC_CACHE_ADAPT
- nextBlock(to);
-#ifdef GC_CACHE_ADAPT
- completePageConvert(orig, to, tmp_ptr, true);
-#endif // GC_CACHE_ADAPT
- if(stopblock == to->numblocks) {
- // already fulfilled the block
-#ifdef GC_TBL_DEBUG
- tprintf("CCCC %x \n", (int)(orig->ptr));
-#endif
- return true;
- } // if(stopblock == to->numblocks)
- } // if(to->top + isize > to->bound)
- // set the mark field to 2, indicating that this obj has been moved
- // and need to be flushed
- ((int *)(origptr))[BAMBOOMARKBIT] = COMPACTED;
- unsigned int toptr = (unsigned int)to->ptr;
-#ifdef GC_TBL_DEBUG
- {
- // scan all pointers in ptr
- unsigned int * tt_pointer;
- tt_pointer=pointerarray[type];
- if (tt_pointer==0) {
- /* Array of primitives */
- /* Do nothing */
- } else if (((unsigned int)tt_pointer)==1) {
- /* Array of pointers */
- struct ArrayObject *ao=(struct ArrayObject *)(origptr);
- int tt_length=ao->___length___;
- int tt_j;
- for(tt_j=0; tt_j<tt_length; tt_j++) {
- void *objptr =
- ((void **)(((char *)&ao->___length___)+sizeof(int)))[tt_j];
- if((objptr != 0) &&
- ((gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 0) ||
- (gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 1))) {
- tprintf("Error moveobj, missing live obj ++: %x, %x, %d, %d, %d, %d, %d, %d, %d, %d \n",
- (int)origptr, (int)objptr, __LINE__, tt_j,
- ((int *)(origptr))[0], ((int *)(objptr))[0],
- ((int *)(objptr))[BAMBOOMARKBIT],
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)],
- hostcore(objptr), BAMBOO_NUM_OF_CORE);
- BAMBOO_EXIT(0xb018);
- }
- }
- } else {
- unsigned int tt_size=tt_pointer[0];
- int tt_i;
- for(tt_i=1; tt_i<=tt_size; tt_i++) {
- unsigned int tt_offset=tt_pointer[tt_i];
- void * objptr=*((void **)(((char *)origptr)+tt_offset));
- if((objptr!= 0) &&
- ((gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 0) ||
- (gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 1))) {
- tprintf("Error moveobj, missing live obj ++: %x, %x, %d, %d, %d, %d, %d, %d, %d, %d \n",
- (int)origptr, (int)objptr, __LINE__, tt_i,
- ((int *)(origptr))[0], ((int *)(objptr))[0],
- ((int *)(objptr))[BAMBOOMARKBIT],
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)],
- hostcore(objptr), BAMBOO_NUM_OF_CORE);
- BAMBOO_EXIT(0xb019);
- }
- }
- } // if (pointer==0) else if ... else ...
- {
- tt_pointer=pointerarray[OBJECTTYPE];
- //handle object class
- unsigned int tt_size=tt_pointer[0];
- int tt_i;
- for(tt_i=1; tt_i<=tt_size; tt_i++) {
- unsigned int tt_offset=tt_pointer[tt_i];
- void * objptr=*((void **)(((char *)origptr)+tt_offset));
- if((objptr!= 0) &&
- ((gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 0) ||
- (gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 1))) {
- tprintf("Error moveobj, missing live obj ++: %x, %x, %d, %d, %d, %d, %d, %d, %d, %d \n",
- (int)origptr, (int)objptr, __LINE__, tt_i,
- ((int *)(origptr))[0], ((int *)(objptr))[0],
- ((int *)(objptr))[BAMBOOMARKBIT],
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)],
- hostcore(objptr), BAMBOO_NUM_OF_CORE);
- BAMBOO_EXIT(0xb01a);
- }
- }
- }
- }
- if((unsigned int)(toptr) > (unsigned int)(origptr)) {
- tprintf("Error to->ptr > orig->ptr: %x, %x \n", (int)(toptr),
- (int)(origptr));
- BAMBOO_EXIT(0xb01b);
- }
-#endif
- if(toptr != origptr) {
- if((unsigned int)(origptr) < (unsigned int)(toptr+size)) {
- memmove(toptr, origptr, size);
- } else {
- memcpy(toptr, origptr, size);
- }
- // fill the remaining space with -2
- BAMBOO_MEMSET_WH((unsigned int)(toptr+size), -2, isize-size);
- }
-#ifdef GC_TBL_DEBUG
- if((gcmappingtbl[OBJMAPPINGINDEX((unsigned int)origptr)] != 2)) {
- tprintf("Error moveobj: %x, %x, %d \n", (int)origptr,
- ((int *)(origptr))[BAMBOOMARKBIT],
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)origptr)]);
- BAMBOO_EXIT(0xb01c);
- }
-#endif
- // store mapping info
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)origptr)]=(unsigned int)toptr;
-#ifdef GC_TBL_DEBUG
- if(gcmappingtbl[OBJMAPPINGINDEX((unsigned int)origptr)] ==
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)origptr)-1]) {
- tprintf("Error moveobj ++ : %x, %x, %d \n", (int)origptr, (int)toptr,
- OBJMAPPINGINDEX((unsigned int)origptr));
- BAMBOO_EXIT(0xb01d);
- }
- // scan all pointers in ptr
- unsigned int * tt_pointer;
- tt_pointer=pointerarray[type];
- if (tt_pointer==0) {
- /* Array of primitives */
- /* Do nothing */
- } else if (((unsigned int)tt_pointer)==1) {
- /* Array of pointers */
- struct ArrayObject *ao=(struct ArrayObject *)(toptr);
- int tt_length=ao->___length___;
- int tt_j;
- for(tt_j=0; tt_j<tt_length; tt_j++) {
- void *objptr =
- ((void **)(((char *)&ao->___length___)+sizeof(int)))[tt_j];
- if((objptr != 0) &&
- (gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 0)) {
- tprintf("Error moveobj, missing live obj ++: %x, %x, %d, %d, %d, %d, %d, %d, %d, %d \n",
- (int)origptr, (int)objptr, __LINE__, tt_j,
- ((int *)(origptr))[0], ((int *)(objptr))[0],
- ((int *)(objptr))[BAMBOOMARKBIT],
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)],
- hostcore(objptr), BAMBOO_NUM_OF_CORE);
- BAMBOO_EXIT(0xb01e);
- }
- }
- } else {
- unsigned int tt_size=tt_pointer[0];
- int tt_i;
- for(tt_i=1; tt_i<=tt_size; tt_i++) {
- unsigned int tt_offset=tt_pointer[tt_i];
- void * objptr=*((void **)(((char *)toptr)+tt_offset));
- if((objptr != 0) &&
- (gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 0)) {
- tprintf("Error moveobj, missing live obj ++: %x, %x, %d, %d, %d, %d, %d, %d, %d, %d \n",
- (int)origptr, (int)objptr, __LINE__, tt_i,
- ((int *)(origptr))[0], ((int *)(objptr))[0],
- ((int *)(objptr))[BAMBOOMARKBIT],
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)],
- hostcore(objptr), BAMBOO_NUM_OF_CORE);
- BAMBOO_EXIT(0xb01f);
- }
- }
- } // if (pointer==0) else if ... else ...
- {
- tt_pointer=pointerarray[OBJECTTYPE];
- //handle object class
- unsigned int tt_size=tt_pointer[0];
- int tt_i;
- for(tt_i=1; tt_i<=tt_size; tt_i++) {
- unsigned int tt_offset=tt_pointer[tt_i];
- void * objptr=*((void **)(((char *)origptr)+tt_offset));
- if((objptr!= 0) &&
- ((gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 0) ||
- (gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)] == 1))) {
- tprintf("Error moveobj, missing live obj ++: %x, %x, %d, %d, %d, %d, %d, %d, %d, %d \n",
- (int)origptr, (int)objptr, __LINE__, tt_i,
- ((int *)(origptr))[0], ((int *)(objptr))[0],
- ((int *)(objptr))[BAMBOOMARKBIT],
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)],
- hostcore(objptr), BAMBOO_NUM_OF_CORE);
- BAMBOO_EXIT(0xb020);
- }
- }
- }
- if(!ISSHAREDOBJ(toptr)) {
- tprintf("Error: %x, %x \n", (int)origptr, (int)toptr);
- BAMBOO_EXIT(0xb021);
- }
-#endif
- GC_BAMBOO_DEBUGPRINT(0xcdce);
- GC_BAMBOO_DEBUGPRINT_REG(origptr);
- GC_BAMBOO_DEBUGPRINT_REG(toptr);
- GC_BAMBOO_DEBUGPRINT_REG(isize);
- gccurr_heaptop -= isize;
- to->ptr += isize;
- to->offset += isize;
- to->top += isize;
-#ifdef GC_CACHE_ADAPT
- unsigned int tmp_ptr = to->ptr;
-#endif // GC_CACHE_ADAPT
- if(to->top == to->bound) {
- // fill the header of this block and then go to next block
- BAMBOO_MEMSET_WH(to->base, '\0', BAMBOO_CACHE_LINE_SIZE);
- (*((int*)(to->base))) = to->offset;
- nextBlock(to);
- }
-#ifdef GC_CACHE_ADAPT
- completePageConvert(orig, to, tmp_ptr, true);
-#endif // GC_CACHE_ADAPT
- } // if(mark == 1)
-#ifdef GC_TBL_DEBUG
- else {
- // skip the whole obj
- int sindex = OBJMAPPINGINDEX((unsigned int)origptr);
- int eindex = OBJMAPPINGINDEX((unsigned int)(origptr+size));
- for(int tmpi = sindex; tmpi < eindex; tmpi++) {
- if((gcmappingtbl[tmpi] != 0) &&
- (hostcore(gcbaseva+bamboo_baseobjsize*tmpi)==BAMBOO_NUM_OF_CORE) &&
- (hostcore(gcbaseva+bamboo_baseobjsize*(tmpi+1))==BAMBOO_NUM_OF_CORE))
- {
- tprintf("Error moveobj **: %x, %x, %x, %d, (%d, %d, %x) \n",
- (int)origptr, (int)(origptr+isize),
- (int)(gcbaseva+bamboo_baseobjsize*tmpi), gcmappingtbl[tmpi], type,
- isize, ((int *)(origptr))[BAMBOOMARKBIT]);
- BAMBOO_EXIT(0xb022);
- }
- }
- }
-#endif
- GC_BAMBOO_DEBUGPRINT(0xe205);
-
- // move to next obj
- orig->ptr += isize; // size;
-
-#ifdef GC_TBL_DEBUG
- if(!ISSHAREDOBJ(orig->ptr) || !ISSHAREDOBJ(to->ptr)) {
- tprintf("Error moveobj out of boundary: %x, %x, %d, %d \n",
- (int)(orig->ptr), (int)(to->ptr), size, isize);
- BAMBOO_EXIT(0x2022);
- }
-#endif
-
- GC_BAMBOO_DEBUGPRINT_REG(isize);
- GC_BAMBOO_DEBUGPRINT_REG(size);
- GC_BAMBOO_DEBUGPRINT_REG(orig->ptr);
- GC_BAMBOO_DEBUGPRINT_REG(orig->bound);
- if(((unsigned int)(orig->ptr) > (unsigned int)(orig->bound))
- || ((unsigned int)(orig->ptr) == (unsigned int)(orig->blockbound))) {
- GC_BAMBOO_DEBUGPRINT(0xe206);
- if(!nextSBlock(orig)) {
- // finished, no more data
-#ifdef GC_TBL_DEBUG
- tprintf("DDDD %x \n", (int)(orig->ptr));
-#endif
- return true;
- }
- }
- GC_BAMBOO_DEBUGPRINT(0xe207);
- GC_BAMBOO_DEBUGPRINT_REG(orig->ptr);
- return false;
-} //bool moveobj(struct moveHelper* orig,struct moveHelper* to,int* endaddr)
-
-// should be invoked with interrupt closed
-inline int assignSpareMem_I(unsigned int sourcecore,
- unsigned int * requiredmem,
- unsigned int * tomove,
- unsigned int * startaddr) {
- unsigned int b = 0;
- BLOCKINDEX(gcloads[sourcecore], &b);
- unsigned int boundptr = (b<NUMCORES4GC) ? ((b+1)*BAMBOO_SMEM_SIZE_L)
- : (BAMBOO_LARGE_SMEM_BOUND+(b-NUMCORES4GC+1)*BAMBOO_SMEM_SIZE);
- unsigned int remain = boundptr - gcloads[sourcecore];
- unsigned int memneed = requiredmem + BAMBOO_CACHE_LINE_SIZE;
- *startaddr = gcloads[sourcecore];
- *tomove = gcfilledblocks[sourcecore] + 1;
- if(memneed < remain) {
- gcloads[sourcecore] += memneed;
- return 0;
- } else {
- // next available block
- gcfilledblocks[sourcecore] += 1;
- unsigned int newbase = 0;
- BASEPTR(sourcecore, gcfilledblocks[sourcecore], &newbase);
- gcloads[sourcecore] = newbase;
- return requiredmem-remain;
- }
-} // int assignSpareMem_I(int ,int * , int * , int * )
-
-// should be invoked with interrupt closed
-inline bool gcfindSpareMem_I(unsigned int * startaddr,
- unsigned int * tomove,
- unsigned int * dstcore,
- unsigned int requiredmem,
- unsigned int requiredcore) {
- for(int k = 0; k < NUMCORES4GC; k++) {
- if((gccorestatus[k] == 0) && (gcfilledblocks[k] < gcstopblock[k])) {
- // check if this stopped core has enough mem
- assignSpareMem_I(k, requiredmem, tomove, startaddr);
- *dstcore = k;
- return true;
- }
- }
- // if can not find spare mem right now, hold the request
- gcrequiredmems[requiredcore] = requiredmem;
- gcmovepending++;
- return false;
-} //bool gcfindSpareMem_I(int* startaddr,int* tomove,int mem,int core)
-
-inline bool compacthelper(struct moveHelper * orig,
- struct moveHelper * to,
- int * filledblocks,
- unsigned int * heaptopptr,
- bool * localcompact) {
- // scan over all objs in this block, compact the marked objs
- // loop stop when finishing either scanning all active objs or
- // fulfilled the gcstopblock
- GC_BAMBOO_DEBUGPRINT(0xe101);
- GC_BAMBOO_DEBUGPRINT_REG(gcblock2fill);
- GC_BAMBOO_DEBUGPRINT_REG(gcmarkedptrbound);
-innercompact:
- while((unsigned int)(orig->ptr) < (unsigned int)gcmarkedptrbound) {
- bool stop = moveobj(orig, to, gcblock2fill);
- if(stop) {
- break;
- }
- }
-#ifdef GC_TBL_DEBUG
- //tprintf("finish mark %x \n", (int)gcmarkedptrbound);
-#endif
-#ifdef GC_CACHE_ADAPT
- // end of an to page, wrap up its information
- samplingDataConvert(to->ptr);
-#endif // GC_CACHE_ADAPT
- // if no objs have been compact, do nothing,
- // otherwise, fill the header of this block
- if(to->offset > (unsigned int)BAMBOO_CACHE_LINE_SIZE) {
- BAMBOO_MEMSET_WH(to->base, '\0', BAMBOO_CACHE_LINE_SIZE);
- (*((int*)(to->base))) = to->offset;
- } else {
- to->offset = 0;
- to->ptr = to->base;
- to->top -= BAMBOO_CACHE_LINE_SIZE;
- } // if(to->offset > BAMBOO_CACHE_LINE_SIZE) else ...
- if(*localcompact) {
- *heaptopptr = to->ptr;
- *filledblocks = to->numblocks;
- }
- GC_BAMBOO_DEBUGPRINT(0xe102);
- GC_BAMBOO_DEBUGPRINT_REG(orig->ptr);
- GC_BAMBOO_DEBUGPRINT_REG(gcmarkedptrbound);
- GC_BAMBOO_DEBUGPRINT_REG(*heaptopptr);
- GC_BAMBOO_DEBUGPRINT_REG(*filledblocks);
- GC_BAMBOO_DEBUGPRINT_REG(gccurr_heaptop);
-
- // send msgs to core coordinator indicating that the compact is finishing
- // send compact finish message to core coordinator
- if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
- gcfilledblocks[BAMBOO_NUM_OF_CORE] = *filledblocks;
- gcloads[BAMBOO_NUM_OF_CORE] = *heaptopptr;
- if((unsigned int)(orig->ptr) < (unsigned int)gcmarkedptrbound) {
- GC_BAMBOO_DEBUGPRINT(0xe103);
- // ask for more mem
- gctomove = false;
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- if(gcfindSpareMem_I(&gcmovestartaddr, &gcblock2fill, &gcdstcore,
- gccurr_heaptop, BAMBOO_NUM_OF_CORE)) {
- GC_BAMBOO_DEBUGPRINT(0xe104);
- gctomove = true;
- } else {
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- GC_BAMBOO_DEBUGPRINT(0xe105);
- return false;
- }
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- } else {
- GC_BAMBOO_DEBUGPRINT(0xe106);
- gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
- gctomove = false;
- return true;
- }
- } else {
- if((unsigned int)(orig->ptr) < (unsigned int)gcmarkedptrbound) {
- GC_BAMBOO_DEBUGPRINT(0xe107);
- // ask for more mem
- gctomove = false;
- send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
- *filledblocks, *heaptopptr, gccurr_heaptop, false);
- } else {
- GC_BAMBOO_DEBUGPRINT(0xe108);
- GC_BAMBOO_DEBUGPRINT_REG(*heaptopptr);
- // finish compacting
- send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
- *filledblocks, *heaptopptr, 0, false);
- }
- } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE)
-
- if(orig->ptr < gcmarkedptrbound) {
- GC_BAMBOO_DEBUGPRINT(0xe109);
- // still have unpacked obj
- while(true) {
- if(gctomove) {
- break;
- }
- }
- ;
- gctomove = false;
- GC_BAMBOO_DEBUGPRINT(0xe10a);
-
- to->ptr = gcmovestartaddr;
- to->numblocks = gcblock2fill - 1;
- to->bound = (to->numblocks==0) ?
- BAMBOO_SMEM_SIZE_L :
- BAMBOO_SMEM_SIZE_L+BAMBOO_SMEM_SIZE*to->numblocks;
- BASEPTR(gcdstcore, to->numblocks, &(to->base));
- to->offset = to->ptr - to->base;
- to->top = (to->numblocks==0) ?
- (to->offset) : (to->bound-BAMBOO_SMEM_SIZE+to->offset);
- to->base = to->ptr;
- to->offset = BAMBOO_CACHE_LINE_SIZE;
- to->ptr += to->offset; // for header
- to->top += to->offset;
- if(gcdstcore == BAMBOO_NUM_OF_CORE) {
- *localcompact = true;
- } else {
- *localcompact = false;
- }
-#ifdef GC_CACHE_ADAPT
- // initialize the gc_cache_revise_information
- gc_cache_revise_infomation.to_page_start_va = (unsigned int)to->ptr;
- gc_cache_revise_infomation.to_page_end_va = gcbaseva+(BAMBOO_PAGE_SIZE)
- *(((unsigned int)(to->base)-gcbaseva)/(BAMBOO_PAGE_SIZE)+1);
- gc_cache_revise_infomation.to_page_index =
- ((unsigned int)(to->base)-gcbaseva)/(BAMBOO_PAGE_SIZE);
- gc_cache_revise_infomation.orig_page_start_va = orig->ptr;
- gc_cache_revise_infomation.orig_page_end_va = gcbaseva+(BAMBOO_PAGE_SIZE)
- *(((unsigned int)(orig->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE)+1);
- gc_cache_revise_infomation.orig_page_index =
- ((unsigned int)(orig->blockbase)-gcbaseva)/(BAMBOO_PAGE_SIZE);
-#endif // GC_CACHE_ADAPT
- goto innercompact;
- }
- GC_BAMBOO_DEBUGPRINT(0xe10b);
- return true;
-} // void compacthelper()
-
-inline void compact() {
- if(COMPACTPHASE != gcphase) {
- BAMBOO_EXIT(0xb023);
- }
-
- // initialize pointers for comapcting
- struct moveHelper * orig =
- (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
- struct moveHelper * to =
- (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
- if(!initOrig_Dst(orig, to)) {
- // no available data to compact
- // send compact finish msg to STARTUP core
- GC_BAMBOO_DEBUGPRINT(0xe001);
- GC_BAMBOO_DEBUGPRINT_REG(to->base);
- send_msg_5(STARTUPCORE, GCFINISHCOMPACT, BAMBOO_NUM_OF_CORE,
- 0, to->base, 0, false);
- RUNFREE(orig);
- RUNFREE(to);
- return;
- }
-#ifdef GC_CACHE_ADAPT
- gc_cache_revise_infomation.orig_page_start_va = (unsigned int)orig->ptr;
- gc_cache_revise_infomation.orig_page_end_va = gcbaseva+(BAMBOO_PAGE_SIZE)
- *(((unsigned int)(orig->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE)+1);
- gc_cache_revise_infomation.orig_page_index =
- ((unsigned int)(orig->blockbase)-gcbaseva)/(BAMBOO_PAGE_SIZE);
-#endif // GC_CACHE_ADAPT
-
- unsigned int filledblocks = 0;
- unsigned int heaptopptr = 0;
- bool localcompact = true;
- compacthelper(orig, to, &filledblocks, &heaptopptr, &localcompact);
- RUNFREE(orig);
- RUNFREE(to);
-} // compact()
-
-// if return NULL, means
-// 1. objptr is NULL
-// 2. objptr is not a shared obj
-// in these cases, remain the original value is OK
-#ifdef GC_TBL_DEBUG
-inline void * flushObj(void * objptr, int linenum, void * ptr, int tt) {
-#else
-inline void * flushObj(void * objptr) {
-#endif
- GC_BAMBOO_DEBUGPRINT(0xe401);
- if(objptr == NULL) {
- return NULL;
- }
- void * dstptr = NULL;
- if(ISSHAREDOBJ(objptr)) {
- GC_BAMBOO_DEBUGPRINT(0xe402);
- GC_BAMBOO_DEBUGPRINT_REG(objptr);
- // a shared obj ptr, change to new address
- dstptr = gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)];
- GC_BAMBOO_DEBUGPRINT_REG(dstptr);
-#ifdef GC_TBL_DEBUG
- if(ISSHAREDOBJ(dstptr) && ((unsigned int)(((int*)dstptr)[0]) >= (unsigned int)NUMTYPES)) {
- tprintf("Error flushObj ** : %x, %x, %d, %d, %d, %d, %x, %x, %x, %d, %x, %d %d \n",
- (int)objptr, (int)dstptr, ((int*)dstptr)[0], hostcore(objptr),
- hostcore(objptr)==BAMBOO_NUM_OF_CORE,
- OBJMAPPINGINDEX((unsigned int)objptr), (int)gcmappingtbl,
- &(gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)]),
- (int)gcbaseva, linenum, (int)ptr, ((int*)ptr)[0], tt);
- BAMBOO_EXIT(0xb024);
- }
-#endif
-
- if(!ISSHAREDOBJ(dstptr)) {
-#ifdef GC_TBL_DEBUG
- tprintf("Error flushObj ++ : %x, %x, %d, %d, %d, %x, %x, %x, %d, %x, %d %d \n",
- (int)objptr, (int)dstptr, hostcore(objptr),
- hostcore(objptr)==BAMBOO_NUM_OF_CORE,
- OBJMAPPINGINDEX((unsigned int)objptr), (int)gcmappingtbl,
- &(gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)]),
- (int)gcbaseva, linenum, (int)ptr, ((int*)ptr)[0], tt);
- tprintf("gcmappingtbl: \n");
- int tmp = OBJMAPPINGINDEX((unsigned int)objptr) - 50;
- for(int jj = 0; jj < 100; jj+=10) {
- tprintf("%8x, %8x, %8x, %8x, %8x, %8x, %8x, %8x, %8x, %8x, %d \n",
- (int)gcmappingtbl[tmp++], (int)gcmappingtbl[tmp++],
- (int)gcmappingtbl[tmp++], (int)gcmappingtbl[tmp++],
- (int)gcmappingtbl[tmp++], (int)gcmappingtbl[tmp++],
- (int)gcmappingtbl[tmp++], (int)gcmappingtbl[tmp++],
- (int)gcmappingtbl[tmp++], (int)gcmappingtbl[tmp++], tmp);
- }
- BAMBOO_EXIT(0xb025);
-#else
- // no mapping info
- GC_BAMBOO_DEBUGPRINT(0xe403);
- GC_BAMBOO_DEBUGPRINT_REG(objptr);
- GC_BAMBOO_DEBUGPRINT_REG(hostcore(objptr));
- // error! the obj is right on this core, but cannot find it
- GC_BAMBOO_DEBUGPRINT_REG(objptr);
- tprintf("Error flushObj ++ : %x, %x, %d, %d, %x, %x, %x, %x\n",
- (int)objptr, (int)dstptr, hostcore(objptr),
- hostcore(objptr)==BAMBOO_NUM_OF_CORE,
- OBJMAPPINGINDEX((unsigned int)objptr), (int)gcmappingtbl,
- &(gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)]),
- (int)gcbaseva);
- BAMBOO_EXIT(0xb026);
-#endif
- } // if(NULL == dstptr)
- } // if(ISSHAREDOBJ(objptr))
-#ifdef GC_TBL_DEBUG
- else {
- tprintf("Error flushObj: %x \n", (int)objptr);
- BAMBOO_EXIT(0xb027);
- }
-#endif
- // if not a shared obj, return NULL to indicate no need to flush
- GC_BAMBOO_DEBUGPRINT(0xe404);
- return dstptr;
-} // void flushObj(void * objptr)
-
-inline void flushRuntimeObj(struct garbagelist * stackptr) {
- int i,j;
- // flush current stack
- while(stackptr!=NULL) {
- for(i=0; i<stackptr->size; i++) {
- if(stackptr->array[i] != NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(stackptr->array[i],
- __LINE__, stackptr->array[i], i);
-#else
- void * dst = flushObj(stackptr->array[i]);
-#endif
- if(dst != NULL) {
- stackptr->array[i] = dst;
- }
- }
- }
- stackptr=stackptr->next;
- }
-
- // flush static pointers global_defs_p
- if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
- struct garbagelist * staticptr=(struct garbagelist *)global_defs_p;
- for(i=0; i<staticptr->size; i++) {
- if(staticptr->array[i] != NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(staticptr->array[i],
- __LINE__, staticptr->array[i], i);
-#else
- void * dst = flushObj(staticptr->array[i]);
-#endif
- if(dst != NULL) {
- staticptr->array[i] = dst;
- }
- }
- }
- }
-
-#ifdef TASK
- // flush objectsets
- if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
- for(i=0; i<NUMCLASSES; i++) {
- struct parameterwrapper ** queues =
- objectqueues[BAMBOO_NUM_OF_CORE][i];
- int length = numqueues[BAMBOO_NUM_OF_CORE][i];
- for(j = 0; j < length; ++j) {
- struct parameterwrapper * parameter = queues[j];
- struct ObjectHash * set=parameter->objectset;
- struct ObjectNode * ptr=set->listhead;
- while(ptr!=NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj((void *)ptr->key,
- __LINE__, (void *)ptr->key, 0);
-#else
- void * dst = flushObj((void *)ptr->key);
-#endif
- if(dst != NULL) {
- ptr->key = dst;
- }
- ptr=ptr->lnext;
- }
- ObjectHashrehash(set);
- }
- }
- }
-
- // flush current task descriptor
- if(currtpd != NULL) {
- for(i=0; i<currtpd->numParameters; i++) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(currtpd->parameterArray[i],
- __LINE__, currtpd->parameterArray[i], i);
-#else
- void * dst = flushObj(currtpd->parameterArray[i]);
-#endif
- if(dst != NULL) {
- currtpd->parameterArray[i] = dst;
- }
- }
- }
-
- // flush active tasks
- if(activetasks != NULL) {
- struct genpointerlist * ptr=activetasks->list;
- while(ptr!=NULL) {
- struct taskparamdescriptor *tpd=ptr->src;
- int i;
- for(i=0; i<tpd->numParameters; i++) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(tpd->parameterArray[i],
- __LINE__, tpd->parameterArray[i], i);
-#else
- void * dst = flushObj(tpd->parameterArray[i]);
-#endif
- if(dst != NULL) {
- tpd->parameterArray[i] = dst;
- }
- }
- ptr=ptr->inext;
- }
- genrehash(activetasks);
- }
-
- // flush cached transferred obj
- struct QueueItem * tmpobjptr = getHead(&objqueue);
- while(tmpobjptr != NULL) {
- struct transObjInfo * objInfo =
- (struct transObjInfo *)(tmpobjptr->objectptr);
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(objInfo->objptr, __LINE__,
- objInfo->objptr, 0);
-#else
- void * dst = flushObj(objInfo->objptr);
-#endif
- if(dst != NULL) {
- objInfo->objptr = dst;
- }
- tmpobjptr = getNextQueueItem(tmpobjptr);
- }
-
- // flush cached objs to be transferred
- struct QueueItem * item = getHead(totransobjqueue);
- while(item != NULL) {
- struct transObjInfo * totransobj =
- (struct transObjInfo *)(item->objectptr);
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(totransobj->objptr, __LINE__,
- totransobj->objptr, 0);
-#else
- void * dst = flushObj(totransobj->objptr);
-#endif
- if(dst != NULL) {
- totransobj->objptr = dst;
- }
- item = getNextQueueItem(item);
- } // while(item != NULL)
-
- // enqueue lock related info
- for(i = 0; i < runtime_locklen; ++i) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(runtime_locks[i].redirectlock,
- __LINE__, runtime_locks[i].redirectlock, i);
-#else
- void * dst = flushObj(runtime_locks[i].redirectlock);
-#endif
- if(dst != NULL) {
- runtime_locks[i].redirectlock = (int)dst;
- }
- if(runtime_locks[i].value != NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst=flushObj(runtime_locks[i].value,
- __LINE__, runtime_locks[i].value, i);
-#else
- void * dst=flushObj(runtime_locks[i].value);
-#endif
- if(dst != NULL) {
- runtime_locks[i].value = (int)dst;
- }
- }
- }
-#endif
-
-#ifdef MGC
- // flush the bamboo_threadlocks
- for(i = 0; i < bamboo_threadlocks.index; i++) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj((void *)(bamboo_threadlocks.locks[i].object),
- __LINE__, (void *)(bamboo_threadlocks.locks[i].object), i);
-#else
- void * dst = flushObj((void *)(bamboo_threadlocks.locks[i].object));
-#endif
- if(dst != NULL) {
- bamboo_threadlocks.locks[i].object = (struct ___Object___ *)dst;
- }
- }
-
- // flush the bamboo_current_thread
- if(bamboo_current_thread != 0) {
-#ifdef GC_TBL_DEBUG
- bamboo_current_thread =
- (unsigned int)(flushObj((void *)bamboo_current_thread,
- __LINE__, (void *)bamboo_current_thread, 0));
-#else
- bamboo_current_thread =
- (unsigned int)(flushObj((void *)bamboo_current_thread));
-#endif
- }
-
- // flush global thread queue
- if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
- unsigned int thread_counter = *((unsigned int*)(bamboo_thread_queue+1));
- if(thread_counter > 0) {
- unsigned int start = *((unsigned int*)(bamboo_thread_queue+2));
- for(i = thread_counter; i > 0; i--) {
-#ifdef GC_TBL_DEBUG
- bamboo_thread_queue[4+start] =
- (INTPTR)(flushObj((void *)bamboo_thread_queue[4+start
- ], __LINE__, (void *)bamboo_thread_queue, 0));
-#else
- bamboo_thread_queue[4+start] =
- (INTPTR)(flushObj((void *)bamboo_thread_queue[4+start]));
-#endif
- start = (start+1)&bamboo_max_thread_num_mask;
- }
- }
- unlockthreadqueue();
- }
-#endif
-} // void flushRuntimeObj(struct garbagelist * stackptr)
-
-inline void flush(struct garbagelist * stackptr) {
-
- flushRuntimeObj(stackptr);
-
- while(true) {
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- bool hasItems = gc_moreItems_I();
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- if(!hasItems) {
- break;
- }
-
- GC_BAMBOO_DEBUGPRINT(0xe301);
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- void * ptr = gc_dequeue_I();
-#ifdef GC_TBL_DEBUG
- unsigned int bkptr = (unsigned int)ptr;
-#endif
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- if(ISSHAREDOBJ(ptr)) {
- // should be a local shared obj and should have mapping info
-#ifdef GC_TBL_DEBUG
- ptr = flushObj(ptr, __LINE__, ptr, 0);
-#else
- ptr = flushObj(ptr);
-#endif
- GC_BAMBOO_DEBUGPRINT(0xe302);
- GC_BAMBOO_DEBUGPRINT_REG(ptr);
- if(ptr == NULL) {
- BAMBOO_EXIT(0xb028);
- }
- } // if(ISSHAREDOBJ(ptr))
- if((!ISSHAREDOBJ(ptr))||(((int *)(ptr))[BAMBOOMARKBIT] == COMPACTED)) {
- int type = ((int *)(ptr))[0];
-#ifdef GC_TBL_DEBUG
- if((unsigned int)type >= (unsigned int)NUMTYPES) {
- tprintf("Error flushObj %x, %x, %d, %d \n", bkptr, (int)ptr, type,
- ((int *)(ptr))[BAMBOOMARKBIT]);
- BAMBOO_EXIT(0xb029);
- }
-#endif
- // scan all pointers in ptr
- unsigned int * pointer;
- pointer=pointerarray[type];
- GC_BAMBOO_DEBUGPRINT(0xe303);
- GC_BAMBOO_DEBUGPRINT_REG(pointer);
- if (pointer==0) {
- /* Array of primitives */
- /* Do nothing */
- } else if (((unsigned int)pointer)==1) {
- GC_BAMBOO_DEBUGPRINT(0xe304);
- /* Array of pointers */
- struct ArrayObject *ao=(struct ArrayObject *) ptr;
- int length=ao->___length___;
- int j;
- for(j=0; j<length; j++) {
- GC_BAMBOO_DEBUGPRINT(0xe305);
- void *objptr=
- ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
- GC_BAMBOO_DEBUGPRINT_REG(objptr);
- if(objptr != NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(objptr, __LINE__, ptr, j);
-#else
- void * dst = flushObj(objptr);
-#endif
- if(dst != NULL) {
- ((void **)(((char *)&ao->___length___)+sizeof(int)))[j] = dst;
- }
- }
- }
- } else {
- GC_BAMBOO_DEBUGPRINT(0xe306);
- unsigned int size=pointer[0];
- int i;
- for(i=1; i<=size; i++) {
- GC_BAMBOO_DEBUGPRINT(0xe307);
- unsigned int offset=pointer[i];
- void * objptr=*((void **)(((char *)ptr)+offset));
- GC_BAMBOO_DEBUGPRINT_REG(objptr);
- if(objptr != NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(objptr, __LINE__, ptr, i);
-#else
- void * dst = flushObj(objptr);
-#endif
- if(dst != NULL) {
- *((void **)(((char *)ptr)+offset)) = dst;
- }
- }
- } // for(i=1; i<=size; i++)
- } // if (pointer==0) else if (((INTPTR)pointer)==1) else ()
- {
- pointer=pointerarray[OBJECTTYPE];
- //handle object class
- unsigned int size=pointer[0];
- int i;
- for(i=1; i<=size; i++) {
- unsigned int offset=pointer[i];
- void * objptr=*((void **)(((char *)ptr)+offset));
- if(objptr != NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(objptr, __LINE__, ptr, i);
-#else
- void * dst = flushObj(objptr);
-#endif
- if(dst != NULL) {
- *((void **)(((char *)ptr)+offset)) = dst;
- }
- }
- }
- }
- // restore the mark field, indicating that this obj has been flushed
- if(ISSHAREDOBJ(ptr)) {
- ((int *)(ptr))[BAMBOOMARKBIT] = INIT;
- }
- } //if((!ISSHAREDOBJ(ptr))||(((int *)(ptr))[BAMBOOMARKBIT] == COMPACTED))
- } // while(gc_moreItems())
- GC_BAMBOO_DEBUGPRINT(0xe308);
-
- // TODO bug here: the startup core contains all lobjs' info, thus all the
- // lobjs are flushed in sequence.
- // flush lobjs
- while(gc_lobjmoreItems_I()) {
- GC_BAMBOO_DEBUGPRINT(0xe309);
- void * ptr = gc_lobjdequeue_I(NULL, NULL);
-#ifdef GC_TBL_DEBUG
- ptr = flushObj(ptr, __LINE__, ptr, 0);
-#else
- ptr = flushObj(ptr);
-#endif
- GC_BAMBOO_DEBUGPRINT(0xe30a);
- GC_BAMBOO_DEBUGPRINT_REG(ptr);
- GC_BAMBOO_DEBUGPRINT_REG(((int *)(ptr))[0]);
- if(ptr == NULL) {
- BAMBOO_EXIT(0xb02a);
- }
- if(((int *)(ptr))[BAMBOOMARKBIT] == COMPACTED) {
- int type = ((int *)(ptr))[0];
- // scan all pointers in ptr
- unsigned int * pointer;
- pointer=pointerarray[type];
- GC_BAMBOO_DEBUGPRINT(0xe30b);
- GC_BAMBOO_DEBUGPRINT_REG(pointer);
- if (pointer==0) {
- /* Array of primitives */
- /* Do nothing */
- } else if (((unsigned int)pointer)==1) {
- GC_BAMBOO_DEBUGPRINT(0xe30c);
- /* Array of pointers */
- struct ArrayObject *ao=(struct ArrayObject *) ptr;
- int length=ao->___length___;
- int j;
- for(j=0; j<length; j++) {
- GC_BAMBOO_DEBUGPRINT(0xe30d);
- void *objptr=
- ((void **)(((char *)&ao->___length___)+sizeof(int)))[j];
- GC_BAMBOO_DEBUGPRINT_REG(objptr);
- if(objptr != NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(objptr, __LINE__, ptr, j);
-#else
- void * dst = flushObj(objptr);
-#endif
- if(dst != NULL) {
- ((void **)(((char *)&ao->___length___)+sizeof(int)))[j] = dst;
- }
- }
- }
- } else {
- GC_BAMBOO_DEBUGPRINT(0xe30e);
- unsigned int size=pointer[0];
- int i;
- for(i=1; i<=size; i++) {
- GC_BAMBOO_DEBUGPRINT(0xe30f);
- unsigned int offset=pointer[i];
- void * objptr=*((void **)(((char *)ptr)+offset));
-
- GC_BAMBOO_DEBUGPRINT_REG(objptr);
- if(objptr != NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(objptr, __LINE__, ptr, i);
-#else
- void * dst = flushObj(objptr);
-#endif
- if(dst != NULL) {
- *((void **)(((char *)ptr)+offset)) = dst;
- }
- }
- } // for(i=1; i<=size; i++)
- } // if (pointer==0) else if (((INTPTR)pointer)==1) else ()
- {
- pointer=pointerarray[OBJECTTYPE];
- //handle object class
- unsigned int size=pointer[0];
- int i;
- for(i=1; i<=size; i++) {
- unsigned int offset=pointer[i];
- void * objptr=*((void **)(((char *)ptr)+offset));
- if(objptr != NULL) {
-#ifdef GC_TBL_DEBUG
- void * dst = flushObj(objptr, __LINE__, ptr, i);
-#else
- void * dst = flushObj(objptr);
-#endif
- if(dst != NULL) {
- *((void **)(((char *)ptr)+offset)) = dst;
- }
- }
- }
- }
- // restore the mark field, indicating that this obj has been flushed
- ((int *)(ptr))[BAMBOOMARKBIT] = INIT;
- } // if(((int *)(ptr))[BAMBOOMARKBIT] == COMPACTED)
- } // while(gc_lobjmoreItems())
- GC_BAMBOO_DEBUGPRINT(0xe310);
-
- // send flush finish message to core coordinator
- if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
- gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
- } else {
- send_msg_2(STARTUPCORE, GCFINISHFLUSH, BAMBOO_NUM_OF_CORE, false);
- }
- GC_BAMBOO_DEBUGPRINT(0xe311);
-} // flush()
-
-#ifdef GC_CACHE_ADAPT
-// prepare for cache adaption:
-// -- flush the shared heap
-// -- clean dtlb entries
-// -- change cache strategy
-void cacheAdapt_gc(bool isgccachestage) {
- // flush the shared heap
- BAMBOO_CACHE_FLUSH_L2();
-
- // clean the dtlb entries
- BAMBOO_CLEAN_DTLB();
-
- // change the cache strategy
- gccachestage = isgccachestage;
-} // cacheAdapt_gc(bool isgccachestage)
-
-// the master core decides how to adapt cache strategy for the mutator
-// according to collected statistic data
-
-// make all pages hfh
-int cacheAdapt_policy_h4h(){
- unsigned int page_index = 0;
- VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
- unsigned int numchanged = 0;
- int * tmp_p = gccachepolicytbl+1;
- for(page_index = 0; page_index < page_num; page_index++) {
- page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
- bamboo_cache_policy_t policy = {0};
- policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
- *tmp_p = page_index;
- tmp_p++;
- *tmp_p = policy.word;
- tmp_p++;
- numchanged++;
- }
-
- return numchanged;
-} // int cacheAdapt_policy_hfh()
-
-// make all pages local as non-cache-adaptable gc local mode
-int cacheAdapt_policy_local(){
- unsigned int page_index = 0;
- VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
- unsigned int numchanged = 0;
- int * tmp_p = gccachepolicytbl+1;
- for(page_index = 0; page_index < page_num; page_index++) {
- page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
- bamboo_cache_policy_t policy = {0};
- unsigned int block = 0;
- BLOCKINDEX(page_sva, &block);
- unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
- // locally cache the page in the hotest core
- // NOTE: (x,y) should be changed to (x+1, y+1)!!!
- policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
- policy.lotar_x = bamboo_cpu2coords[2*coren]+1;
- policy.lotar_y = bamboo_cpu2coords[2*coren+1]+1;
- *tmp_p = page_index;
- tmp_p++;
- *tmp_p = policy.word;
- tmp_p++;
- numchanged++;
- }
-
- return numchanged;
-} // int cacheAdapt_policy_local()
-
-int cacheAdapt_policy_hotest(){
- unsigned int page_index = 0;
- VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
- unsigned int numchanged = 0;
- int * tmp_p = gccachepolicytbl+1;
- for(page_index = 0; page_index < page_num; page_index++) {
- page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
- bamboo_cache_policy_t policy = {0};
- unsigned int hotestcore = 0;
- unsigned int hotfreq = 0;
-
- int *local_tbl=&gccachesamplingtbl_r[page_index];
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- int freq = *local_tbl;
- local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
-
- // check the freqency, decide if this page is hot for the core
- if(hotfreq < freq) {
- hotfreq = freq;
- hotestcore = i;
- }
- }
- // TODO
- // Decide the cache strategy for this page
- // If decide to adapt a new cache strategy, write into the shared block of
- // the gcsharedsamplingtbl. The mem recording information that has been
- // written is enough to hold the information.
- // Format: page start va + cache strategy(hfh/(host core+[x,y]))
- if(hotfreq == 0) {
- // this page has not been accessed, do not change its cache policy
- continue;
- } else {
- // locally cache the page in the hotest core
- // NOTE: (x,y) should be changed to (x+1, y+1)!!!
- policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
- policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
- policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
- *tmp_p = page_index;
- tmp_p++;
- *tmp_p = policy.word;
- tmp_p++;
- numchanged++;
- }
- }
-
- return numchanged;
-} // int cacheAdapt_policy_hotest()
-
-#define GC_CACHE_ADAPT_DOMINATE_THRESHOLD 50
-// cache the page on the core that accesses it the most if that core accesses
-// it more than (GC_CACHE_ADAPT_DOMINATE_THRESHOLD)% of the total. Otherwise,
-// h4h the page.
-int cacheAdapt_policy_dominate(){
- unsigned int page_index = 0;
- VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
- unsigned int numchanged = 0;
- int * tmp_p = gccachepolicytbl+1;
- for(page_index = 0; page_index < page_num; page_index++) {
- page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
- bamboo_cache_policy_t policy = {0};
- unsigned int hotestcore = 0;
- unsigned long long totalfreq = 0;
- unsigned int hotfreq = 0;
-
- int *local_tbl=&gccachesamplingtbl_r[page_index];
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- int freq = *local_tbl;
- local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
- totalfreq += freq;
- // check the freqency, decide if this page is hot for the core
- if(hotfreq < freq) {
- hotfreq = freq;
- hotestcore = i;
- }
- }
-
- // Decide the cache strategy for this page
- // If decide to adapt a new cache strategy, write into the shared block of
- // the gcpolicytbl
- // Format: page start va + cache policy
- if(hotfreq == 0) {
- // this page has not been accessed, do not change its cache policy
- continue;
- }
- totalfreq =
- (totalfreq*GC_CACHE_ADAPT_DOMINATE_THRESHOLD)/100/BAMBOO_PAGE_SIZE;
- hotfreq/=BAMBOO_PAGE_SIZE;
- if(hotfreq < totalfreq) {
- // use hfh
- policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
- } else {
- // locally cache the page in the hotest core
- // NOTE: (x,y) should be changed to (x+1, y+1)!!!
- policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
- policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
- policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
- }
- *tmp_p = page_index;
- tmp_p++;
- *tmp_p = policy.word;
- tmp_p++;
- numchanged++;
- }
-
- return numchanged;
-} // int cacheAdapt_policy_dominate()
-
-#define GC_CACHE_ADAPT_OVERLOAD_THRESHOLD 10
-
-void gc_quicksort(unsigned long long *array,
- unsigned int left,
- unsigned int right,
- unsigned int offset) {
- unsigned int pivot = 0;;
- unsigned int leftIdx = left;
- unsigned int rightIdx = right;
- if((right-left+1) >= 1) {
- pivot = (left+right)/2;
- while((leftIdx <= pivot) && (rightIdx >= pivot)) {
- unsigned long long pivotValue = array[pivot*3-offset];
- while((array[leftIdx*3-offset] > pivotValue) && (leftIdx <= pivot)) {
- leftIdx++;
- }
- while((array[rightIdx*3-offset] < pivotValue) && (rightIdx >= pivot)) {
- rightIdx--;
- }
- // swap [leftIdx] & [rightIdx]
- for(int k = 0; k < 3; k++) {
- unsigned long long tmp = array[3*rightIdx-k];
- array[3*rightIdx-k] = array[3*leftIdx-k];
- array[3*leftIdx-k] = tmp;
- }
- leftIdx++;
- rightIdx--;
- if((leftIdx-1) == pivot) {
- pivot = rightIdx = rightIdx + 1;
- } else if((leftIdx+1) == pivot) {
- pivot = leftIdx = leftIdx-1;
- }
- }
- gc_quicksort(array, left, pivot-1, offset);
- gc_quicksort(array, pivot+1, right, offset);
- }
- return;
-} // void gc_quicksort(...)
-
-// Every page cached on the core that accesses it the most.
-// Check to see if any core's pages total more accesses than threshold
-// GC_CACHE_ADAPT_OVERLOAD_THRESHOLD. If so, find the pages with the
-// most remote accesses and hash for home them until we get below
-// GC_CACHE_ADAPT_OVERLOAD_THRESHOLD
-int cacheAdapt_policy_overload(){
- unsigned int page_index = 0;
- VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
- unsigned int numchanged = 0;
- int * tmp_p = gccachepolicytbl+1;
- unsigned long long workload[NUMCORESACTIVE];
- memset(workload, 0, NUMCORESACTIVE*sizeof(unsigned long long));
- unsigned long long total_workload = 0;
- unsigned long long core2heavypages[NUMCORESACTIVE][page_num*3+1];
- memset(core2heavypages,0,
- sizeof(unsigned long long)*(page_num*3+1)*NUMCORESACTIVE);
- for(page_index = 0; page_index < page_num; page_index++) {
- page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
- bamboo_cache_policy_t policy = {0};
- unsigned int hotestcore = 0;
- unsigned long long totalfreq = 0;
- unsigned int hotfreq = 0;
-
- int *local_tbl=&gccachesamplingtbl_r[page_index];
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- int freq = *local_tbl;
- local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
- totalfreq += freq;
- // check the freqency, decide if this page is hot for the core
- if(hotfreq < freq) {
- hotfreq = freq;
- hotestcore = i;
- }
- }
- // Decide the cache strategy for this page
- // If decide to adapt a new cache strategy, write into the shared block of
- // the gcsharedsamplingtbl. The mem recording information that has been
- // written is enough to hold the information.
- // Format: page start va + cache strategy(hfh/(host core+[x,y]))
- if(hotfreq == 0) {
- // this page has not been accessed, do not change its cache policy
- continue;
- }
-
- totalfreq/=BAMBOO_PAGE_SIZE;
- hotfreq/=BAMBOO_PAGE_SIZE;
- // locally cache the page in the hotest core
- // NOTE: (x,y) should be changed to (x+1, y+1)!!!
- policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
- policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
- policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
- *tmp_p = page_index;
- tmp_p++;
- *tmp_p = policy.word;
- tmp_p++;
- numchanged++;
- workload[hotestcore] += totalfreq;
- total_workload += totalfreq;
- // insert into core2heavypages using quicksort
- unsigned long long remoteaccess = totalfreq - hotfreq;
- unsigned int index = (unsigned int)core2heavypages[hotestcore][0];
- core2heavypages[hotestcore][3*index+3] = remoteaccess;
- core2heavypages[hotestcore][3*index+2] = totalfreq;
- core2heavypages[hotestcore][3*index+1] = (unsigned long long)(tmp_p-1);
- core2heavypages[hotestcore][0]++;
- }
-
- unsigned long long workload_threshold =
- total_workload/GC_CACHE_ADAPT_OVERLOAD_THRESHOLD;
- // Check the workload of each core
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- int j = 1;
- unsigned int index = (unsigned int)core2heavypages[i][0];
- if(workload[i] > workload_threshold) {
- // sort according to the remoteaccess
- gc_quicksort(&core2heavypages[i][0], 1, index, 0);
- while((workload[i] > workload_threshold) && (j<index*3)) {
- // hfh those pages with more remote accesses
- bamboo_cache_policy_t policy = {0};
- policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
- *((unsigned int*)core2heavypages[i][j]) = policy.word;
- workload[i] -= core2heavypages[i][j+1];
- j += 3;
- }
- }
- }
-
- return numchanged;
-} // int cacheAdapt_policy_overload()
-
-#define GC_CACHE_ADAPT_ACCESS_THRESHOLD 70
-#define GC_CACHE_ADAPT_CROWD_THRESHOLD 20
-// Every page cached on the core that accesses it the most.
-// Check to see if any core's pages total more accesses than threshold
-// GC_CACHE_ADAPT_OVERLOAD_THRESHOLD. If so, find the pages with the
-// most remote accesses and hash for home them until we get below
-// GC_CACHE_ADAPT_OVERLOAD_THRESHOLD.
-// Sort pages based on activity....
-// If more then GC_CACHE_ADAPT_ACCESS_THRESHOLD% of the accesses for a
-// core's pages are from more than GC_CACHE_ADAPT_CROWD_THRESHOLD pages,
-// then start hfh these pages(selecting the ones with the most remote
-// accesses first or fewest local accesses) until we get below
-// GC_CACHE_ADAPT_CROWD_THRESHOLD pages.
-int cacheAdapt_policy_crowd(){
- unsigned int page_index = 0;
- VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
- unsigned int numchanged = 0;
- int * tmp_p = gccachepolicytbl+1;
- unsigned long long workload[NUMCORESACTIVE];
- memset(workload, 0, NUMCORESACTIVE*sizeof(unsigned long long));
- unsigned long long total_workload = 0;
- unsigned long long core2heavypages[NUMCORESACTIVE][page_num*3+1];
- memset(core2heavypages,0,
- sizeof(unsigned long long)*(page_num*3+1)*NUMCORESACTIVE);
- for(page_index = 0; page_index < page_num; page_index++) {
- page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
- bamboo_cache_policy_t policy = {0};
- unsigned int hotestcore = 0;
- unsigned long long totalfreq = 0;
- unsigned int hotfreq = 0;
-
- int *local_tbl=&gccachesamplingtbl_r[page_index];
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- int freq = *local_tbl;
- local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
- totalfreq += freq;
- // check the freqency, decide if this page is hot for the core
- if(hotfreq < freq) {
- hotfreq = freq;
- hotestcore = i;
- }
- }
- // Decide the cache strategy for this page
- // If decide to adapt a new cache strategy, write into the shared block of
- // the gcsharedsamplingtbl. The mem recording information that has been
- // written is enough to hold the information.
- // Format: page start va + cache strategy(hfh/(host core+[x,y]))
- if(hotfreq == 0) {
- // this page has not been accessed, do not change its cache policy
- continue;
- }
- totalfreq/=BAMBOO_PAGE_SIZE;
- hotfreq/=BAMBOO_PAGE_SIZE;
- // locally cache the page in the hotest core
- // NOTE: (x,y) should be changed to (x+1, y+1)!!!
- policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
- policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
- policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
- *tmp_p = page_index;
- tmp_p++;
- *tmp_p = policy.word;
- tmp_p++;
- numchanged++;
- workload[hotestcore] += totalfreq;
- total_workload += totalfreq;
- // insert into core2heavypages using quicksort
- unsigned long long remoteaccess = totalfreq - hotfreq;
- unsigned int index = (unsigned int)core2heavypages[hotestcore][0];
- core2heavypages[hotestcore][3*index+3] = remoteaccess;
- core2heavypages[hotestcore][3*index+2] = totalfreq;
- core2heavypages[hotestcore][3*index+1] = (unsigned long long)(tmp_p-1);
- core2heavypages[hotestcore][0]++;
- }
-
- unsigned long long workload_threshold =
- total_workload / GC_CACHE_ADAPT_OVERLOAD_THRESHOLD;
- // Check the workload of each core
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- int j = 1;
- unsigned int index = (unsigned int)core2heavypages[i][0];
- if(workload[i] > workload_threshold) {
- // sort according to the remoteaccess
- gc_quicksort(&core2heavypages[i][0], 1, index, 0);
- while((workload[i] > workload_threshold) && (j<index*3)) {
- // hfh those pages with more remote accesses
- bamboo_cache_policy_t policy = {0};
- policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
- *((unsigned int*)core2heavypages[i][j]) = policy.word;
- workload[i] -= core2heavypages[i][j+1];
- j += 3;
- }
- }
-
- // Check if the accesses are crowded on few pages
- // sort according to the total access
-inner_crowd:
- gc_quicksort(&core2heavypages[i][0], j/3+1, index, 1);
- unsigned long long threshold =
- GC_CACHE_ADAPT_ACCESS_THRESHOLD*workload[i]/100;
- int num_crowded = 0;
- unsigned long long t_workload = 0;
- do {
- t_workload += core2heavypages[i][j+num_crowded*3+1];
- num_crowded++;
- } while(t_workload < threshold);
- // num_crowded <= GC_CACHE_ADAPT_CROWD_THRESHOLD and if there are enough
- // items, it is always == GC_CACHE_ADAPT_CROWD_THRESHOLD
- if(num_crowded > GC_CACHE_ADAPT_CROWD_THRESHOLD) {
- // need to hfh these pages
- // sort the pages according to remote access
- gc_quicksort(&core2heavypages[i][0], j/3+1, j/3+num_crowded, 0);
- // h4h those pages with more remote accesses
- bamboo_cache_policy_t policy = {0};
- policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
- *((unsigned int*)core2heavypages[i][j]) = policy.word;
- workload[i] -= core2heavypages[i][j+1];
- t_workload -= core2heavypages[i][j+1];
- j += 3;
- threshold = GC_CACHE_ADAPT_ACCESS_THRESHOLD*workload[i]/100;
- goto inner_crowd;
- }
- }
-
- return numchanged;
-} // int cacheAdapt_policy_overload()
-
-void cacheAdapt_master() {
-#ifdef GC_CACHE_ADAPT_SAMPLING_OUTPUT
- gc_output_cache_sampling_r();
-#endif // GC_CACHE_ADAPT_SAMPLING_OUTPUT
- unsigned int numchanged = 0;
- // check the statistic data
- // for each page, decide the new cache strategy
-#ifdef GC_CACHE_ADAPT_POLICY1
- numchanged = cacheAdapt_policy_h4h();
-#elif defined GC_CACHE_ADAPT_POLICY2
- numchanged = cacheAdapt_policy_local();
-#elif defined GC_CACHE_ADAPT_POLICY3
- numchanged = cacheAdapt_policy_hotest();
-#elif defined GC_CACHE_ADAPT_POLICY4
- numchanged = cacheAdapt_policy_dominate();
-#elif defined GC_CACHE_ADAPT_POLICY5
- numchanged = cacheAdapt_policy_overload();
-#elif defined GC_CACHE_ADAPT_POLICY6
- numchanged = cacheAdapt_policy_crowd();
-#endif
- *gccachepolicytbl = numchanged;
-}
-
-// adapt the cache strategy for the mutator
-void cacheAdapt_mutator() {
- int numchanged = *gccachepolicytbl;
- // check the changes and adapt them
- int * tmp_p = gccachepolicytbl+1;
- while(numchanged--) {
- // read out the policy
- int page_index = *tmp_p;
- bamboo_cache_policy_t policy = (bamboo_cache_policy_t)(*(tmp_p+1));
- // adapt the policy
- bamboo_adapt_cache_policy(page_index*(BAMBOO_PAGE_SIZE)+gcbaseva,
- policy, BAMBOO_PAGE_SIZE);
-
- tmp_p += 2;
- }
-}
-
-void gc_output_cache_sampling() {
- unsigned int page_index = 0;
- VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
- for(page_index = 0; page_index < page_num; page_index++) {
- page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
- unsigned int block = 0;
- BLOCKINDEX(page_sva, &block);
- unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
- tprintf("va: %x page_index: %d host: %d\n",
- (int)page_sva, page_index, coren);
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- int * local_tbl = (int *)((void *)gccachesamplingtbl
- +size_cachesamplingtbl_local*i);
- int freq = local_tbl[page_index];
- printf("%8d ",freq);
- }
- printf("\n");
- }
- printf("=================\n");
-} // gc_output_cache_sampling
-
-void gc_output_cache_sampling_r() {
- unsigned int page_index = 0;
- VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
- for(page_index = 0; page_index < page_num; page_index++) {
- page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
- unsigned int block = 0;
- BLOCKINDEX(page_sva, &block);
- unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
- tprintf("va: %x page_index: %d host: %d\n",
- (int)page_sva, page_index, coren);
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- int * local_tbl = (int *)((void *)gccachesamplingtbl_r
- +size_cachesamplingtbl_local_r*i);
- int freq = local_tbl[page_index]/BAMBOO_PAGE_SIZE;
- printf("%8d ",freq);
- }
- printf("\n");
- }
- printf("=================\n");
-} // gc_output_cache_sampling
-#endif // GC_CACHE_ADAPT
-
-inline void gc_collect(struct garbagelist * stackptr) {
- // inform the master that this core is at a gc safe point and is ready to
- // do gc
- send_msg_4(STARTUPCORE, GCFINISHPRE, BAMBOO_NUM_OF_CORE, self_numsendobjs,
- self_numreceiveobjs, false);
-
- // core collector routine
- while(true) {
- if(INITPHASE == gcphase) {
- break;
- }
- }
-#ifdef RAWPATH // TODO GC_DEBUG
- printf("(%X,%X) Do initGC\n", udn_tile_coord_x(), udn_tile_coord_y());
-#endif
- initGC();
-#ifdef GC_CACHE_ADAPT
- // prepare for cache adaption:
- cacheAdapt_gc(true);
-#endif // GC_CACHE_ADAPT
- //send init finish msg to core coordinator
- send_msg_2(STARTUPCORE, GCFINISHINIT, BAMBOO_NUM_OF_CORE, false);
-
- while(true) {
- if(MARKPHASE == gcphase) {
- break;
- }
- }
-#ifdef RAWPATH // TODO GC_DEBUG
- printf("(%x,%x) Start mark phase\n", udn_tile_coord_x(),
- udn_tile_coord_y());
-#endif
- mark(true, stackptr);
-#ifdef RAWPATH // TODO GC_DEBUG
- printf("(%x,%x) Finish mark phase, start compact phase\n",
- udn_tile_coord_x(), udn_tile_coord_y());
-#endif
- compact();
-#ifdef RAWPATH // TODO GC_DEBUG
- printf("(%x,%x) Finish compact phase\n", udn_tile_coord_x(),
- udn_tile_coord_y());
-#endif