GC_PRINTF("Start cachepolicy phase \n");
// cache adapt phase
cacheAdapt_decision(BAMBOO_NUM_OF_CORE);
- GC_CHECK_ALL_CORE_STATUS(CACHEPOLICYPHASE==gc_status_info.gcphase);
+ GC_CHECK_ALL_CORE_STATUS();
BAMBOO_CACHE_MF();
// let all cores to adopt new policies
// cache adapt phase
cacheAdapt_mutator();
cacheAdapt_gc(false);
- GC_CHECK_ALL_CORE_STATUS(PREFINISHPHASE==gc_status_info.gcphase);
+ GC_CHECK_ALL_CORE_STATUS();
CACHEADAPT_SAMPING_RESET();
if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
WAITFORGCPHASE(UPDATEPHASE);
- GC_PRINTF("Start flush phase\n");
+ GC_PRINTF("Start update phase\n");
GCPROFILE_INFO_2_MASTER();
update(stackptr);
- GC_PRINTF("Finish flush phase\n");
+ GC_PRINTF("Finish update phase\n");
CACHEADAPT_PHASE_CLIENT();
GC_PRINTF("Start mark phase\n");
mark(stackptr);
- GC_PRINTF("Finish mark phase, wait for flush\n");
+ GC_PRINTF("Finish mark phase, wait for update\n");
// non-gc core collector routine
WAITFORGCPHASE(UPDATEPHASE);
- GC_PRINTF("Start flush phase\n");
+ GC_PRINTF("Start update phase\n");
GCPROFILE_INFO_2_MASTER();
update(stackptr);
- GC_PRINTF("Finish flush phase\n");
+ GC_PRINTF("Finish update phase\n");
CACHEADAPT_PHASE_CLIENT();
gc_status_info.gcphase = UPDATEPHASE;
GC_SEND_MSG_1_TO_CLIENT(GCSTARTUPDATE);
GCPROFILE_ITEM();
- GC_PRINTF("Start flush phase \n");
- // flush phase
+ GC_PRINTF("Start update phase \n");
+ // update phase
update(stackptr);
- GC_CHECK_ALL_CORE_STATUS(UPDATEPHASE==gc_status_info.gcphase);
- GC_PRINTF("Finish flush phase \n");
+ GC_CHECK_ALL_CORE_STATUS();
+ GC_PRINTF("Finish update phase \n");
}
void master_finish() {
GC_SEND_MSG_1_TO_CLIENT(GCSTARTINIT);
CACHEADAPT_GC(true);
GC_PRINTF("Check core status \n");
- GC_CHECK_ALL_CORE_STATUS(true);
+ GC_CHECK_ALL_CORE_STATUS();
GCPROFILE_ITEM();
unsigned long long tmpt = BAMBOO_GET_EXE_TIME();
CACHEADAPT_OUTPUT_CACHE_SAMPLING();
// update the references
master_updaterefs(stackptr);
-
+ GC_PRINTF("gc master finished update \n");
// do cache adaptation
CACHEADAPT_PHASE_MASTER();
static bool gc_checkCoreStatus() {
for(int i = 0; i < NUMCORES4GC; i++) {
if(gccorestatus[i]) {
- printf("CHECK\n");
return false;
}
}
#define NEXTTOPCORE(b) (gc_block2core[((b)+1)%(NUMCORES4GC*2)])
// check if all cores are stall now
-#define GC_CHECK_ALL_CORE_STATUS(f) \
- { \
- gccorestatus[BAMBOO_NUM_OF_CORE] = 0; \
- while(f) { \
- if(gc_checkCoreStatus()) { \
- break; \
- } \
- } \
- }
+#define GC_CHECK_ALL_CORE_STATUS() gccorestatus[BAMBOO_NUM_OF_CORE] = 0; \
+ while(!gc_checkCoreStatus()) ;
// send a 1-word msg to all clients
#define GC_SEND_MSG_1_TO_CLIENT(m) \
to->bound = to->base + BLOCKSIZE(to->localblocknum);
}
+//This function is called on the master core only...and typically by
+//the message interrupt handler
+
+void handleReturnMem_I(unsigned int cnum, void *heaptop) {
+ unsigned int blockindex;
+ BLOCKINDEX(blockindex, heaptop);
+ unsigned INTPTR localblocknum=GLOBALBLOCK2LOCAL(blockindex);
+
+ //this core is done as far as memory usage is concerned
+ returnedmem[cnum]=0;
+
+ struct blockrecord * blockrecord=&allocationinfo.blocktable[blockindex];
+
+ blockrecord->status=BS_FREE;
+ blockrecord->usedspace=(unsigned INTPTR)(heaptop-OFFSET2BASEVA(blockindex));
+ blockrecord->freespace=BLOCKSIZE(localblocknum)-blockrecord->usedspace;
+ /* Update the lowest free block */
+ if (blockindex < allocationinfo.lowestfreeblock) {
+ blockindex=allocationinfo.lowestfreeblock;
+ }
+
+ /* This is our own block...means we should mark other blocks above us as free*/
+ if (cnum==blockrecord->corenum) {
+ unsigned INTPTR nextlocalblocknum=localblocknum+1;
+ for(;nextlocalblocknum<numblockspercore;nextlocalblocknum++) {
+ unsigned INTPTR blocknum=BLOCKINDEX2(cnum, nextlocalblocknum);
+ struct blockrecord * nextblockrecord=&allocationinfo.blocktable[blockindex];
+ nextblockrecord->status=BS_FREE;
+ nextblockrecord->usedspace=0;
+ //this is true because this cannot be the lowest block
+ nextblockrecord->freespace=BLOCKSIZE(1);
+ }
+ }
+}
+
+void handleReturnMem(unsigned int cnum, void *heaptop) {
+ BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
+ handleReturnMem_I(cnum, heaptop);
+ BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
+}
+
void getSpaceRemotely(struct moveHelper *to, unsigned int minimumbytes) {
//need to get another block from elsewhere
//set flag to wait for memory
- gctomove=false;
- //send request for memory
- send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, minimumbytes, gccurr_heaptop);
- //wait for flag to be set that we received message
- while(!gctomove) ;
+ if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
+ printf("A: %d\n", BAMBOO_NUM_OF_CORE);
+
+ gctomove=false;
+ BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
+ void *startaddr=handlegcfinishcompact_I(BAMBOO_NUM_OF_CORE, minimumbytes, gccurr_heaptop);
+ BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
+ if (startaddr) {
+ gcmovestartaddr=startaddr;
+ } else {
+ while(!gctomove) ;
+ }
+ printf("B: %d\n", BAMBOO_NUM_OF_CORE);
+ } else {
+ printf("C: %d\n", BAMBOO_NUM_OF_CORE);
+ gctomove=false;
+ //send request for memory
+ send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, minimumbytes, gccurr_heaptop);
+ //wait for flag to be set that we received message
+ while(!gctomove) ;
+ printf("D: %d\n", BAMBOO_NUM_OF_CORE);
+ }
//store pointer
to->ptr = gcmovestartaddr;
while(true) {
if ((gccurr_heaptop < ((unsigned INTPTR)(to->bound-to->ptr)))&&!senttopmessage) {
//This block is the last for this core...let the startup know
- send_msg_3(STARTUPCORE, GCRETURNMEM, BAMBOO_NUM_OF_CORE, to->ptr+gccurr_heaptop);
+ if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
+ handleReturnMem(BAMBOO_NUM_OF_CORE, to->ptr+gccurr_heaptop);
+ } else {
+ send_msg_3(STARTUPCORE, GCRETURNMEM, BAMBOO_NUM_OF_CORE, to->ptr+gccurr_heaptop);
+ }
//Only send the message once
senttopmessage=true;
}
-
unsigned int minimumbytes=compactblocks(orig, to);
if (orig->ptr==orig->bound) {
//need more data to compact
getSpace(to, minimumbytes);
}
}
-
- send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, 0, 0);
+
+ if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
+ BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
+ handlegcfinishcompact_I(BAMBOO_NUM_OF_CORE, 0, 0);
+ BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
+ } else {
+ send_msg_4(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE, 0, 0);
+ }
+
}
void * checkNeighbors_I(int corenum, unsigned INTPTR requiredmem, unsigned INTPTR desiredmem) {
unsigned int compactblocks(struct moveHelper * orig, struct moveHelper * to) {
void *toptrinit=to->ptr;
- void *toptr=toptr;
+ void *toptr=toptrinit;
void *tobound=to->bound;
void *origptr=orig->ptr;
void *origbound=orig->bound;
// compact phase
compact();
/* wait for all cores to finish compacting */
+ tprintf("MASTER WAITING\n");
while(!gc_checkCoreStatus())
;
+ tprintf("POST_WAIT\n");
GCPROFILE_ITEM();
//just in case we didn't get blocks back...
#define UPDATEOBJ(obj, tt) {void *updatetmpptr=obj; if (updatetmpptr!=NULL) obj=updateObj(updatetmpptr);}
#define UPDATEOBJNONNULL(obj, tt) {void *updatetmpptr=obj; obj=updateObj(updatetmpptr);}
-
+#define dbpr() if (STARTUPCORE==BAMBOO_NUM_OF_CORE) tprintf("FL: %d\n", __LINE__);
INLINE void updategarbagelist(struct garbagelist *listptr) {
for(;listptr!=NULL; listptr=listptr->next) {
if (endtoptr>tobound||endtoptr<tobase) {
//get use the next block of memory
orig->ptr=origptr;
+ if (BAMBOO_NUM_OF_CORE==STARTUPCORE) {
+ tprintf("dstptr=%x\n",dstptr);
+ tprintf("endtoptrptr=%x\n",endtoptr);
+ tprintf("tobound=%x\n",tobound);
+ tprintf("tobase=%x\n",tobase);
+ tprintf("origptr=%x\n",origptr);
+ tprintf("length=%d\n",length);
+ }
return dstptr;
}
void updatehelper(struct moveHelper * orig,struct moveHelper * to) {
while(true) {
+ dbpr();
void *dstptr=updateblocks(orig, to);
+ dbpr();
if (dstptr) {
- //need more memory to compact into
+ dbpr();
+ printf("M: %x\n", dstptr);
+ //need more memory to compact into
block_t blockindex;
BLOCKINDEX(blockindex, dstptr);
unsigned int corenum;
;
}
}
+ dbpr();
if (orig->ptr==orig->bound) {
+ dbpr();
//inform others that we are done with previous block
updateOrigPtr(orig->bound);
update_origblockptr=orig->base;
orig->ptr=orig->base;
orig->bound = orig->base + BLOCKSIZE(orig->localblocknum);
- if (orig->base >= gcbaseva+BAMBOO_SHARED_MEM_SIZE)
+ if (orig->base >= gcbaseva+BAMBOO_SHARED_MEM_SIZE) {
+ //free our entire memory for others to use
break;
+ }
}
+ dbpr();
}
}
void updateheap() {
- BAMBOO_CACHE_MF();
-
// initialize structs for compacting
struct moveHelper orig={0,NULL,NULL,0,NULL,0,0,0,0};
struct moveHelper to={0,NULL,NULL,0,NULL,0,0,0,0};
+ dbpr();
initOrig_Dst(&orig, &to);
-
+ dbpr();
updatehelper(&orig, &to);
+ dbpr();
}
void update(struct garbagelist * stackptr) {
- BAMBOO_CACHE_MF();
-
+ dbpr();
updateRuntimePtrs(stackptr);
-
+ dbpr();
updateheap();
-
+ dbpr();
// send update finish message to core coordinator
if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
void * globalmalloc_I(int coren, unsigned INTPTR memcheck, int * allocsize) {
block_t firstfree=NOFREEBLOCK;
block_t lowestblock=allocationinfo.lowestfreeblock;
+
for(block_t searchblock=lowestblock;searchblock<GCNUMBLOCK;searchblock++) {
struct blockrecord * block=&allocationinfo.blocktable[searchblock];
if (block->status==BS_FREE) {
MSG_INDEXINC_I();
void * heaptop = (void *) msgdata[msgdataindex];
MSG_INDEXINC_I();
- unsigned int blockindex;
- BLOCKINDEX(blockindex, heaptop);
- unsigned INTPTR localblocknum=GLOBALBLOCK2LOCAL(blockindex);
- //this core is done as far as memory usage is concerned
- returnedmem[cnum]=0;
-
- struct blockrecord * blockrecord=&allocationinfo.blocktable[blockindex];
-
- blockrecord->status=BS_FREE;
- blockrecord->usedspace=(unsigned INTPTR)(heaptop-OFFSET2BASEVA(blockindex));
- blockrecord->freespace=BLOCKSIZE(localblocknum)-blockrecord->usedspace;
- /* Update the lowest free block */
- if (blockindex < allocationinfo.lowestfreeblock) {
- blockindex=allocationinfo.lowestfreeblock;
- }
-
- /* This is our own block...means we should mark other blocks above us as free*/
- if (cnum==blockrecord->corenum) {
- unsigned INTPTR nextlocalblocknum=localblocknum+1;
- for(;nextlocalblocknum<numblockspercore;nextlocalblocknum++) {
- unsigned INTPTR blocknum=BLOCKINDEX2(cnum, nextlocalblocknum);
- struct blockrecord * nextblockrecord=&allocationinfo.blocktable[blockindex];
- nextblockrecord->status=BS_FREE;
- nextblockrecord->usedspace=0;
- //this is true because this cannot be the lowest block
- nextblockrecord->freespace=BLOCKSIZE(1);
- }
- }
+ handleReturnMem_I(cnum, heaptop);
}
-INLINE void processmsg_gcfinishcompact_I() {
- BAMBOO_ASSERT(BAMBOO_NUM_OF_CORE == STARTUPCORE);
-
- int cnum = msgdata[msgdataindex];
- MSG_INDEXINC_I();
- unsigned int bytesneeded = msgdata[msgdataindex];
- MSG_INDEXINC_I();
- unsigned int maxbytesneeded = msgdata[msgdataindex];
- MSG_INDEXINC_I();
-
+void * handlegcfinishcompact_I(int cnum, unsigned int bytesneeded, unsigned int maxbytesneeded) {
if(bytesneeded > 0) {
// ask for more mem
void * startaddr = gcfindSpareMem_I(bytesneeded, maxbytesneeded, cnum);
if(startaddr) {
// cache the msg first
- if(BAMBOO_CHECK_SEND_MODE()) {
- cache_msg_2_I(cnum,GCMOVESTART,startaddr);
- } else {
- send_msg_2_I(cnum,GCMOVESTART,startaddr);
- }
+ return startaddr;
} else {
maxusefulmems[cnum]=maxbytesneeded;
gcrequiredmems[cnum]=bytesneeded;
//done with compacting
gccorestatus[cnum] = 0;
}
+ return NULL;
+}
+
+void processmsg_gcfinishcompact_I() {
+ int cnum = msgdata[msgdataindex];
+ MSG_INDEXINC_I();
+ unsigned int bytesneeded = msgdata[msgdataindex];
+ MSG_INDEXINC_I();
+ unsigned int maxbytesneeded = msgdata[msgdataindex];
+ MSG_INDEXINC_I();
+
+ void * startaddr=handlegcfinishcompact_I(cnum, bytesneeded, maxbytesneeded);
+ if (startaddr) {
+ if(BAMBOO_CHECK_SEND_MODE()) {
+ cache_msg_2_I(cnum,GCMOVESTART,startaddr);
+ } else {
+ send_msg_2_I(cnum,GCMOVESTART,startaddr);
+ }
+ }
}
INLINE void processmsg_gcfinishupdate_I() {
extern volatile bool gcflag;
void * mycalloc_share(struct garbagelist * stackptr, int size) {
void * p = NULL;
-
int isize = ((size-1)&(~(ALIGNMENTSIZE-1)))+ALIGNMENTSIZE;
int hasgc = 0;