From 5705dd8b8ef21f5134b1fb834383b3292ddc65da Mon Sep 17 00:00:00 2001 From: bdemsky Date: Sat, 18 Jun 2011 08:20:46 +0000 Subject: [PATCH] code changes --- Robust/src/Runtime/bamboo/multicoregarbage.c | 8 +- Robust/src/Runtime/bamboo/multicoregarbage.h | 3 - .../src/Runtime/bamboo/multicoregccompact.c | 2 +- Robust/src/Runtime/bamboo/multicoremem.c | 98 ++++--------------- Robust/src/Runtime/bamboo/multicoremem.h | 27 +---- Robust/src/Runtime/bamboo/multicoremsg.c | 25 ++--- Robust/src/Runtime/bamboo/multicoremsg.h | 12 +-- 7 files changed, 41 insertions(+), 134 deletions(-) diff --git a/Robust/src/Runtime/bamboo/multicoregarbage.c b/Robust/src/Runtime/bamboo/multicoregarbage.c index 9dfc6a01..6250a82a 100644 --- a/Robust/src/Runtime/bamboo/multicoregarbage.c +++ b/Robust/src/Runtime/bamboo/multicoregarbage.c @@ -120,8 +120,6 @@ void initmulticoregcdata() { gcmarkedptrbound = 0; gcforwardobjtbl = allocateMGCHash_I(128); gcheaptop = 0; - gctopcore = 0; - gctopblock = 0; gcmovestartaddr = 0; gctomove = false; gcmovepending = 0; @@ -162,8 +160,6 @@ void initGC() { gcnumreceiveobjs[0][i] = gcnumreceiveobjs[1][i] = 0; } gcheaptop = 0; - gctopcore = 0; - gctopblock = 0; gcnumsrobjs_index = 0; } gcself_numsendobjs = 0; @@ -284,7 +280,7 @@ void checkMarkStatus() { } // compute load balance for all cores -int loadbalance(void ** heaptop, unsigned int * topblock, unsigned int * topcore) { +int loadbalance(void ** heaptop) { // compute load balance // get the total loads unsigned int tloads = 0; @@ -299,8 +295,6 @@ int loadbalance(void ** heaptop, unsigned int * topblock, unsigned int * topcore // num of blocks per core unsigned int numbpc = (topblockindex+NUMCORES4GC-1)/NUMCORES4GC; - *topblock = topblockindex; - RESIDECORE(*heaptop, *topcore); return numbpc; } diff --git a/Robust/src/Runtime/bamboo/multicoregarbage.h b/Robust/src/Runtime/bamboo/multicoregarbage.h index e300676d..84e4f0a8 100644 --- a/Robust/src/Runtime/bamboo/multicoregarbage.h +++ b/Robust/src/Runtime/bamboo/multicoregarbage.h @@ -67,9 +67,6 @@ unsigned INTPTR gcloads[NUMCORES4GC]; //Top of each core's heap void * topptrs[NUMCORES4GC]; -unsigned int gctopcore; // the core host the top of the heap -unsigned int gctopblock; // the number of current top block - // compact instruction unsigned int gcmarkedptrbound; unsigned int gcblock2fill; diff --git a/Robust/src/Runtime/bamboo/multicoregccompact.c b/Robust/src/Runtime/bamboo/multicoregccompact.c index a9011b57..8d9e62e5 100644 --- a/Robust/src/Runtime/bamboo/multicoregccompact.c +++ b/Robust/src/Runtime/bamboo/multicoregccompact.c @@ -194,7 +194,7 @@ void compact() { void master_compact() { // predict number of blocks to fill for each core void * tmpheaptop = 0; - int numblockspercore = loadbalance(&tmpheaptop, &gctopblock, &gctopcore); + int numblockspercore = loadbalance(&tmpheaptop); GC_PRINTF("mark phase finished \n"); diff --git a/Robust/src/Runtime/bamboo/multicoremem.c b/Robust/src/Runtime/bamboo/multicoremem.c index 728bdc9a..a4484501 100644 --- a/Robust/src/Runtime/bamboo/multicoremem.c +++ b/Robust/src/Runtime/bamboo/multicoremem.c @@ -7,34 +7,6 @@ #include "multicorehelper.h" #include "multicoremem_helper.h" -INLINE void setupsmemmode(void) { -#ifdef SMEML - // Only allocate local mem chunks to each core. - // If a core has used up its local shared memory, start gc. - bamboo_smem_mode = SMEMLOCAL; -#elif defined SMEMF - // Allocate the local shared memory to each core with the highest priority, - // if a core has used up its local shared memory, try to allocate the - // shared memory that belong to its neighbours, if also failed, start gc. - bamboo_smem_mode = SMEMFIXED; -#elif defined SMEMM - // Allocate the local shared memory to each core with the highest priority, - // if a core has used up its local shared memory, try to allocate the - // shared memory that belong to its neighbours first, if failed, check - // current memory allocation rate, if it has already reached the threshold, - // start gc, otherwise, allocate the shared memory globally. If all the - // shared memory has been used up, start gc. - bamboo_smem_mode = SMEMMIXED; -#elif defined SMEMG - // Allocate all the memory chunks globally, do not consider the host cores - // When all the shared memory are used up, start gc. - bamboo_smem_mode = SMEMGLOBAL; -#else - // defaultly using local mode - bamboo_smem_mode = SMEMLOCAL; -#endif -} - INLINE void * mallocmem(int tofindb, int totest, int size, @@ -72,11 +44,9 @@ INLINE void * searchBlock4Mem(int* tofindb, if((nsize==bound)||((nsize != 0)&&(*totest != *tofindb))) { // a fully/partially occupied partition, can not be appended //the last continuous block is not big enough,check the next local block - i++; - if(2==i) { - i = 0; - j++; - } + j+=i; + i=(i+1)&1; + *tofindb=*totest=gc_core2block[2*gccorenum+i]+(NUMCORES4GC*2)*j; freeblocks--; } else { @@ -249,51 +219,28 @@ void * globalmalloc_I(int coren, return mem; } -// malloc from the shared memory -void * smemalloc_I(int coren, - int size, - int * allocsize) { - void * mem = NULL; - int isize = size+(BAMBOO_CACHE_LINE_SIZE); - - // go through the bamboo_smemtbl for suitable partitions - switch(bamboo_smem_mode) { - case SMEMLOCAL: { - mem = localmalloc_I(coren, isize, allocsize); - break; - } - - case SMEMFIXED: { -#ifdef SMEMF - mem = fixedmalloc_I(coren, isize, allocsize); -#else - // not supported yet - BAMBOO_EXIT(); -#endif - break; - } +void * smemalloc(int coren, int isize, int * allocsize) { + BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT(); + void *retval=smemalloc(coren, isize, allocsize); + BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME(); + return retval; +} - case SMEMMIXED: { -#ifdef SMEMM - mem = mixedmalloc_I(coren, isize, allocsize); -#else - // not supported yet - BAMBOO_EXIT(); +// malloc from the shared memory +void * smemalloc_I(int coren, int isize, int * allocsize) { +#ifdef SMEML + void *mem = localmalloc_I(coren, isize, allocsize); +#elif defined(SMEMF) + void *mem = fixedmalloc_I(coren, isize, allocsize); +#elif defined(SMEMM) + void *mem = mixedmalloc_I(coren, isize, allocsize); +#elif defined(SMEMG) + void *mem = globalmalloc_I(coren, isize, allocsize); #endif - break; - } - - case SMEMGLOBAL: { - mem = globalmalloc_I(coren, isize, allocsize); - break; - } - - default: - break; - } if(mem == NULL) { // no enough shared global memory + // trigger gc *allocsize = 0; if(!gcflag) { gcflag = true; @@ -310,11 +257,6 @@ void * smemalloc_I(int coren, } return NULL; } - /*if(coren == hostcore(mem)) { - tprintf("Allocate host mem: %d, %d, %d \n", coren, hostcore(mem), mem); - } else { - tprintf("---Allocate non-host mem: %d, %d, %d \n", coren, hostcore(mem), mem); - }*/ return mem; } #else diff --git a/Robust/src/Runtime/bamboo/multicoremem.h b/Robust/src/Runtime/bamboo/multicoremem.h index 877a36f2..86e6f289 100644 --- a/Robust/src/Runtime/bamboo/multicoremem.h +++ b/Robust/src/Runtime/bamboo/multicoremem.h @@ -106,18 +106,6 @@ volatile bool gc_localheap_s; -typedef enum { - SMEMLOCAL = 0x0,// 0x0, using local mem only - SMEMFIXED, // 0x1, use local mem in lower address space(1 block only) - // and global mem in higher address space - SMEMMIXED, // 0x2, like FIXED mode but use a threshold to control - SMEMGLOBAL, // 0x3, using global mem only - SMEMEND -} SMEMSTRATEGY; - -SMEMSTRATEGY bamboo_smem_mode; //-DSMEML: LOCAL; -DSMEMF: FIXED; - //-DSMEMM: MIXED; -DSMEMG: GLOBAL; - struct freeMemItem { unsigned int ptr; int size; @@ -132,16 +120,6 @@ struct freeMemList { // only maintain 1 freemMemItem }; -// Zero out the remaining bamboo_cur_msp. Only zero out the first 4 bytes -// of the remaining memory -#define BAMBOO_CLOSE_CUR_MSP() \ - { \ - if((bamboo_cur_msp!=NULL)&&(bamboo_smem_zero_top==bamboo_cur_msp) \ - &&(bamboo_smem_size>0)) { \ - *bamboo_cur_msp = NULL; \ - } \ - } - // table recording the number of allocated bytes on each block // Note: this table resides on the bottom of the shared heap for all cores // to access @@ -154,7 +132,10 @@ unsigned int bamboo_reserved_smem; // reserved blocks on the top of the shared // heap e.g. 20% of the heap and should not // be allocated otherwise gc is invoked volatile unsigned int bamboo_smem_zero_top; -#define BAMBOO_SMEM_ZERO_UNIT_SIZE ((unsigned int)(4 * 1024)) // 4KB + +//BAMBOO_SMEM_ZERO_UNIT_SIZE must evenly divide the page size and be a +//power of two(we rely on both in the allocation function) +#define BAMBOO_SMEM_ZERO_UNIT_SIZE 4096 #else //volatile mspace bamboo_free_msp; unsigned int bamboo_free_smemp; diff --git a/Robust/src/Runtime/bamboo/multicoremsg.c b/Robust/src/Runtime/bamboo/multicoremsg.c index ed4b73e5..7e17de1f 100644 --- a/Robust/src/Runtime/bamboo/multicoremsg.c +++ b/Robust/src/Runtime/bamboo/multicoremsg.c @@ -375,35 +375,29 @@ INLINE void processmsg_memrequest_I() { } INLINE void processmsg_memresponse_I() { - int data1 = msgdata[msgdataindex]; + void * memptr = msgdata[msgdataindex]; MSG_INDEXINC_I(); - int data2 = msgdata[msgdataindex]; + unsigned int numbytes = msgdata[msgdataindex]; MSG_INDEXINC_I(); // receive a shared memory response msg #ifdef MULTICORE_GC // if is currently doing gc, dump this msg if(!gc_status_info.gcprocessing) { #endif - if(data2 == 0) { + if(numbytes == 0) { #ifdef MULTICORE_GC - // Zero out the remaining memory here because for the GC_CACHE_ADAPT - // version, we need to make sure during the gcinit phase the shared heap - // is not touched. Otherwise, there would be problem when adapt the cache - // strategy. - BAMBOO_CLOSE_CUR_MSP(); bamboo_smem_zero_top = NULL; #endif bamboo_smem_size = 0; bamboo_cur_msp = NULL; } else { #ifdef MULTICORE_GC - //CLOSEBLOCK(data1, data2); - bamboo_smem_size = data2 - BAMBOO_CACHE_LINE_SIZE; - bamboo_cur_msp = data1 + BAMBOO_CACHE_LINE_SIZE; + bamboo_smem_size = numbytes; + bamboo_cur_msp = memptr; bamboo_smem_zero_top = bamboo_cur_msp; #else - bamboo_smem_size = data2; - bamboo_cur_msp =(void*)(data1); + bamboo_smem_size = numbytes; + bamboo_cur_msp =memptr; #endif } smemflag = true; @@ -421,7 +415,6 @@ INLINE void processmsg_gcstartpre_I() { // version, we need to make sure during the gcinit phase the shared heap // is not touched. Otherwise, there would be problem when adapt the cache // strategy. - BAMBOO_CLOSE_CUR_MSP(); bamboo_smem_size = 0; bamboo_cur_msp = NULL; smemflag = true; @@ -711,8 +704,8 @@ msg: } processmsg: // processing received msgs - int size = 0; - MSG_REMAINSIZE_I(&size); + int size; + MSG_REMAINSIZE_I(size); if(size == 0) { // not a whole msg // have new coming msg diff --git a/Robust/src/Runtime/bamboo/multicoremsg.h b/Robust/src/Runtime/bamboo/multicoremsg.h index 89f9a670..ea702bd7 100644 --- a/Robust/src/Runtime/bamboo/multicoremsg.h +++ b/Robust/src/Runtime/bamboo/multicoremsg.h @@ -27,13 +27,13 @@ volatile bool isMsgHanging; msgdata[msgdatalast] = (n); \ MSG_LASTINDEXINC_I() -#define MSG_REMAINSIZE_I(s) \ - if(msgdataindex < msgdatalast) { \ - (*(int*)s) = msgdatalast - msgdataindex; \ +#define MSG_REMAINSIZE_I(s) \ + if(msgdataindex < msgdatalast) { \ + s = msgdatalast - msgdataindex; \ } else if((msgdataindex == msgdatalast) && (!msgdatafull)) { \ - (*(int*)s) = 0; \ - } else { \ - (*(int*)s) = (BAMBOO_MSG_BUF_LENGTH) - msgdataindex + msgdatalast; \ + s = 0; \ + } else { \ + s = (BAMBOO_MSG_BUF_LENGTH) - msgdataindex + msgdatalast; \ } #define OUTMSG_INDEXINC() \ -- 2.34.1