From: jzhou Date: Wed, 22 Sep 2010 18:55:18 +0000 (+0000) Subject: bug fix X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=0373a63ecf5c26ccc41a0c6c886430befd4d3e66;p=IRC.git bug fix --- diff --git a/Robust/src/Runtime/bamboo/multicoregarbage.c b/Robust/src/Runtime/bamboo/multicoregarbage.c index 381083a3..5a92e470 100644 --- a/Robust/src/Runtime/bamboo/multicoregarbage.c +++ b/Robust/src/Runtime/bamboo/multicoregarbage.c @@ -2043,7 +2043,7 @@ innermoveobj: #endif // GC_CACHE_ADAPT nextBlock(to); #ifdef GC_CACHE_ADAPT - if((to->base+to->bound) >= gc_cache_revise_infomation.to_page_end_va) { + if((to->ptr) >= gc_cache_revise_infomation.to_page_end_va) { // end of an to page, wrap up its information int tmp_factor = tmp_ptr-gc_cache_revise_infomation.to_page_start_va; int topage=gc_cache_revise_infomation.to_page_index; @@ -2114,7 +2114,6 @@ innermoveobj: to->ptr += isize; to->offset += isize; to->top += isize; -#if 0 #ifdef GC_CACHE_ADAPT int tmp_ptr = to->ptr; #endif // GC_CACHE_ADAPT @@ -2123,6 +2122,7 @@ innermoveobj: BAMBOO_MEMSET_WH(to->base, '\0', BAMBOO_CACHE_LINE_SIZE); (*((int*)(to->base))) = to->offset; nextBlock(to); +#if 0 #ifdef GC_CACHE_ADAPT if((to->base+to->bound) >= gc_cache_revise_infomation.to_page_end_va) { // end of an to page, wrap up its information @@ -2151,8 +2151,36 @@ innermoveobj: (to->ptr-gcbaseva)/(BAMBOO_PAGE_SIZE); } #endif // GC_CACHE_ADAPT - } #endif + } +#ifdef GC_CACHE_ADAPT + if((to->ptr) >= gc_cache_revise_infomation.to_page_end_va) { + // end of an to page, wrap up its information + int tmp_factor = tmp_ptr-gc_cache_revise_infomation.to_page_start_va; + int topage=gc_cache_revise_infomation.to_page_index; + int oldpage = gc_cache_revise_infomation.orig_page_index; + int * newtable=&gccachesamplingtbl_r[topage]; + int * oldtable=&gccachesamplingtbl[oldpage]; + + for(int tt = 0; tt < NUMCORESACTIVE; tt++) { + (*newtable)=((*newtable)+(*oldtable)*tmp_factor); + newtable=(int*) (((char *)newtable)+size_cachesamplingtbl_local_r); + oldtable=(int*) (((char *)oldtable)+size_cachesamplingtbl_local); + } + // prepare for an new to page + int tmp_index = (orig->ptr-gcbaseva)/(BAMBOO_PAGE_SIZE); + gc_cache_revise_infomation.orig_page_start_va = orig->ptr; + gc_cache_revise_infomation.orig_page_end_va = gcbaseva + + (BAMBOO_PAGE_SIZE)*((orig->ptr-gcbaseva)/(BAMBOO_PAGE_SIZE)+1); + gc_cache_revise_infomation.orig_page_index = + (orig->ptr-gcbaseva)/(BAMBOO_PAGE_SIZE); + gc_cache_revise_infomation.to_page_start_va = to->ptr; + gc_cache_revise_infomation.to_page_end_va = gcbaseva + + (BAMBOO_PAGE_SIZE)*((to->ptr-gcbaseva)/(BAMBOO_PAGE_SIZE)+1); + gc_cache_revise_infomation.to_page_index = + (to->ptr-gcbaseva)/(BAMBOO_PAGE_SIZE); + } +#endif // GC_CACHE_ADAPT } // if(mark == 1) #ifdef DEBUG BAMBOO_DEBUGPRINT(0xe205); @@ -3344,12 +3372,19 @@ void cacheAdapt_master() { int numchanged = 0; // check the statistic data // for each page, decide the new cache strategy +#ifdef GC_CACHE_ADAPT_POLICY1 numchanged = cacheAdapt_policy_h4h(); - //numchanged = cacheAdapt_policy_local(); - //numchanged = cacheAdapt_policy_hotest(); - //numchanged = cacheAdapt_policy_dominate(); - //numchanged = cacheAdapt_policy_overload(); - //numchanged = cacheAdapt_policy_crowd(); +#elif defined GC_CACHE_ADAPT_POLICY2 + numchanged = cacheAdapt_policy_local(); +#elif defined GC_CACHE_ADAPT_POLICY3 + numchanged = cacheAdapt_policy_hotest(); +#elif defined GC_CACHE_ADAPT_POLICY4 + numchanged = cacheAdapt_policy_dominate(); +#elif defined GC_CACHE_ADAPT_POLICY5 + numchanged = cacheAdapt_policy_overload(); +#elif defined GC_CACHE_ADAPT_POLICY6 + numchanged = cacheAdapt_policy_crowd(); +#endif *gccachepolicytbl = numchanged; // TODO //if(numchanged > 0) tprintf("=================\n"); @@ -3941,6 +3976,11 @@ inline void gc_master(struct garbagelist * stackptr) { #endif // flush phase flush(stackptr); +#ifdef GC_CACHE_ADAPT + // now the master core need to decide the new cache strategy + cacheAdapt_master(); +#endif // GC_CACHE_ADAPT + gccorestatus[BAMBOO_NUM_OF_CORE] = 0; while(FLUSHPHASE == gcphase) { // check the status of all cores @@ -3957,9 +3997,9 @@ inline void gc_master(struct garbagelist * stackptr) { #endif #ifdef GC_CACHE_ADAPT - // now the master core need to decide the new cache strategy - cacheAdapt_master(); - +#ifdef GC_PROFILE + gc_profileItem(); +#endif gcphase = PREFINISHPHASE; gccorestatus[BAMBOO_NUM_OF_CORE] = 1; // Note: all cores should flush their runtime data including non-gc @@ -3969,9 +4009,6 @@ inline void gc_master(struct garbagelist * stackptr) { gccorestatus[i] = 1; send_msg_1(i, GCSTARTPREF, false); } -#ifdef GC_PROFILE - gc_profileItem(); -#endif #ifdef RAWPATH // TODO GC_DEBUG printf("(%x,%x) Start prefinish phase \n", udn_tile_coord_x(), udn_tile_coord_y()); diff --git a/Robust/src/Runtime/bamboo/multicoregarbage.h b/Robust/src/Runtime/bamboo/multicoregarbage.h index 1189f386..5977e47a 100644 --- a/Robust/src/Runtime/bamboo/multicoregarbage.h +++ b/Robust/src/Runtime/bamboo/multicoregarbage.h @@ -151,7 +151,7 @@ void * gcmappingtbl[NUMCORESACTIVE][NUM_MAPPING];*/ #ifdef GC_SMALLPAGESIZE #define BAMBOO_RMSP_SIZE (1024 * 1024) #else -#define BAMBOO_RMSP_SIZE (BAMBOO_SMEM_SIZE) // (45 * 16 * 1024) +#define BAMBOO_RMSP_SIZE (BAMBOO_SMEM_SIZE*2) // (45 * 16 * 1024) #endif mspace bamboo_rmsp; // shared pointer mapping tbl diff --git a/Robust/src/Runtime/bamboo/multicoreruntime.h b/Robust/src/Runtime/bamboo/multicoreruntime.h index 0651c7c6..e4e6615f 100644 --- a/Robust/src/Runtime/bamboo/multicoreruntime.h +++ b/Robust/src/Runtime/bamboo/multicoreruntime.h @@ -306,12 +306,16 @@ struct Queue * totransobjqueue; // queue to hold objs to be transferred #define BAMBOO_NUM_BLOCKS ((GC_BAMBOO_NUMCORES)*(2+14)) #define BAMBOO_PAGE_SIZE (64 * 1024) // 64K #ifdef GC_LARGEPAGESIZE -#define BAMBOO_SMEM_SIZE (16 * (BAMBOO_PAGE_SIZE)) +#define BAMBOO_PAGE_SIZE (4 * 64 * 1024) +#define BAMBOO_SMEM_SIZE (4 * (BAMBOO_PAGE_SIZE)) #elif defined GC_SMALLPAGESIZE #define BAMBOO_SMEM_SIZE (BAMBOO_PAGE_SIZE) #elif defined GC_SMALLPAGESIZE2 #define BAMBOO_PAGE_SIZE (16 * 1024) // (4096) #define BAMBOO_SMEM_SIZE (BAMBOO_PAGE_SIZE) +#elif defined GC_LARGEPAGESIZE2 +#define BAMBOO_PAGE_SIZE (4 * 64 * 1024) // 64K +#define BAMBOO_SMEM_SIZE ((BAMBOO_PAGE_SIZE)) #else #define BAMBOO_SMEM_SIZE (4 * (BAMBOO_PAGE_SIZE)) #endif // GC_LARGEPAGESIZE diff --git a/Robust/src/Runtime/bamboo/multicoretask.c b/Robust/src/Runtime/bamboo/multicoretask.c index 9a40381f..c56bf727 100644 --- a/Robust/src/Runtime/bamboo/multicoretask.c +++ b/Robust/src/Runtime/bamboo/multicoretask.c @@ -377,12 +377,6 @@ void initruntimedata() { gc_localheap_s = false; #ifdef GC_CACHE_ADAPT gccachestage = false; - // enable the timer interrupt -#ifdef GC_CACHE_SAMPLING - bamboo_tile_timer_set_next_event(GC_TILE_TIMER_EVENT_SETTING); // TODO - bamboo_unmask_timer_intr(); - bamboo_dtlb_sampling_process(); -#endif // GC_CACHE_SAMPLING #endif // GC_CACHE_ADAPT #else // create the lock table, lockresult table and obj queue @@ -793,6 +787,15 @@ inline void run(void * arg) { initialization(); initCommunication(); +#ifdef GC_CACHE_ADAPT +// enable the timer interrupt +#ifdef GC_CACHE_SAMPLING + bamboo_tile_timer_set_next_event(GC_TILE_TIMER_EVENT_SETTING); // TODO + bamboo_unmask_timer_intr(); + bamboo_dtlb_sampling_process(); +#endif // GC_CACHE_SAMPLING +#endif // GC_CACHE_ADAPT + initializeexithandler(); // main process of the execution module @@ -2064,7 +2067,7 @@ INLINE int checkMsgLength_I(int size) { #endif { // nonfixed size if(size > 1) { - msglength = msgdata[(msgdataindex+1)%(BAMBOO_MSG_BUF_LENGTH)]; + msglength = msgdata[(msgdataindex+1)&(BAMBOO_MSG_BUF_MASK)/*%(BAMBOO_MSG_BUF_LENGTH)*/]; } else { return -1; } diff --git a/Robust/src/buildscript b/Robust/src/buildscript index e5a5984f..1ffb3f5d 100755 --- a/Robust/src/buildscript +++ b/Robust/src/buildscript @@ -187,10 +187,13 @@ GCCONTROLLERNEARFLAG=false; GCCONTROLLERREMOTEFLAG=false; GCSMALLPAGESIZEFLAG=false; GCLARGEPAGESIZEFLAG=false; +GCLARGEPAGESIZE2FLAG=false; GCLARGESHAREDHEAPFLAG=false; GCSMALLPAGESIZEFLAG2=false; GCLARGESHAREDHEAPFLAG2=false; GCCACHEADAPTFLAG=false +GCCACHEADAPTPOLICYFLAG=false +GCCACHEADAPTPOLICY='' GCCACHESAMPLINGFLAG=false USEDMALLOC=false THREADFLAG=false @@ -467,6 +470,9 @@ GCSMALLPAGESIZEFLAG2=true elif [[ $1 = '-gclargepagesize' ]] then GCLARGEPAGESIZEFLAG=true +elif [[ $1 = '-gclargepagesize2' ]] +then +GCLARGEPAGESIZE2FLAG=true elif [[ $1 = '-gclargesharedheap' ]] then GCLARGESHAREDHEAPFLAG=true @@ -476,6 +482,11 @@ GCLARGESHAREDHEAPFLAG2=true elif [[ $1 = '-gccacheadapt' ]] then GCCACHEADAPTFLAG=true +elif [[ $1 = '-gccacheadaptpolicy' ]] +then +GCCACHEADAPTPOLICYFLAG=true +GCCACHEADAPTPOLICY="$2" +shift elif [[ $1 = '-gccachesampling' ]] then GCCACHESAMPLINGFLAG=true @@ -963,6 +974,11 @@ then # GC_LARGEPAGESIZE version TILERACFLAGS="${TILERACFLAGS} -DGC_LARGEPAGESIZE" fi +if $GCLARGEPAGESIZE2FLAG +then # GC_LARGEPAGESIZE2 version +TILERACFLAGS="${TILERACFLAGS} -DGC_LARGEPAGESIZE2" +fi + if $GCLARGESHAREDHEAPFLAG then # GC_LARGESHAREDHEAP version TILERACFLAGS="${TILERACFLAGS} -DGC_LARGESHAREDHEAP" @@ -983,6 +999,11 @@ then # GC_CACHE_ADAPT version TILERACFLAGS="${TILERACFLAGS} -DGC_CACHE_ADAPT" fi +if $GCCACHEADAPTPOLICYFLAG +then # GC_CACHE_ADAPT version +TILERACFLAGS="${TILERACFLAGS} -DGC_CACHE_ADAPT_POLICY${GCCACHEADAPTPOLICY}" +fi + if $GCCACHESAMPLINGFLAG then # GC_CACHE_ADAPT version TILERACFLAGS="${TILERACFLAGS} -DGC_CACHE_SAMPLING"