#ifdef GC_CACHE_ADAPT
#include "multicorecache.h"
#include "multicoremsg.h"
+#include "multicoregc.h"
#include "multicoregcprofile.h"
void cacheadapt_finish_compact(void *toptr) {
- unsigned int dstpage=(toptr-gcbaseva)>>BAMBOO_PAGE_SIZE_BITS;
+ unsigned int dstpage=((unsigned INTPTR)(toptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
unsigned int * newtable=&gccachesamplingtbl_r[dstpage*NUMCORESACTIVE];
for(int core = 0; core < NUMCORESACTIVE; core++) {
}
void cacheadapt_finish_src_page(void *srcptr, void *tostart, void *tofinish) {
- unsigned int srcpage=(srcptr-gcbaseva)>>BAMBOO_PAGE_SIZE_BITS;
- unsigned int dstpage=(tostart-gcbaseva)>>BAMBOO_PAGE_SIZE_BITS;
+ unsigned int srcpage=((unsigned INTPTR)(srcptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
+ unsigned int dstpage=((unsigned INTPTR)(tostart-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
unsigned int numbytes=tofinish-tostart;
unsigned int * oldtable=&gccachesamplingtbl[srcpage*NUMCORESACTIVE];
void *tobound=(void *)((((unsigned INTPTR)toptr-1)&~(BAMBOO_PAGE_SIZE-1))+BAMBOO_PAGE_SIZE);
void *origbound=(void *)((((unsigned INTPTR)origptr)&~(BAMBOO_PAGE_SIZE-1))+BAMBOO_PAGE_SIZE);
- unsigned int topage=(toptr-1-gcbaseva)>>BAMBOO_PAGE_SIZE_BITS;
- unsigned int origpage=(origptr-gcbaseva)>>BAMBOO_PAGE_SIZE_BITS;
+ unsigned int topage=((unsigned INTPTR)(toptr-1-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
+ unsigned int origpage=((unsigned INTPTR)(origptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
unsigned int * totable=&gccachesamplingtbl_r[topage*NUMCORESACTIVE];
unsigned int * origtable=&gccachesamplingtbl[origpage*NUMCORESACTIVE];
bytesneeded-=remaintobytes;
topage++;//to page is definitely done
tobound+=BAMBOO_PAGE_SIZE;
- origpage=(origptr-gcbaseva)>>BAMBOO_PAGE_SIZE_BITS;//handle exact match case
+ origpage=((unsigned INTPTR)(origptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;//handle exact match case
origbound=(void *) ((((unsigned INTPTR)origptr)&~(BAMBOO_PAGE_SIZE-1))+BAMBOO_PAGE_SIZE);
} else {
//Finishing off orig page
// -- clean dtlb entries
// -- change cache strategy
void cacheAdapt_gc(bool isgccachestage) {
+#ifdef GC_CACHE_COHERENT_ON
+ if(!isgccachestage) {
+ // get out of GC
+#if (defined(GC_CACHE_ADAPT_POLICY4)||defined(GC_CACHE_ADAPT_POLICY3))
+ // flush the shared heap
+ BAMBOO_CACHE_FLUSH_L2();
+
+ // clean the dtlb entries
+ BAMBOO_CLEAN_DTLB();
+#endif
+ }
+#else
// flush the shared heap
BAMBOO_CACHE_FLUSH_L2();
} else {
bamboo_install_dtlb_handler_for_mutator();
}
+#endif
}
// the master core decides how to adapt cache strategy for the mutator
if(hotfreq != 0) {
// locally cache the page in the hottest core
CACHEADAPT_POLICY_SET_HOST_CORE(policy, hottestcore);
+ } else {
+ // reset it to be homed by its host core
+ unsigned int block = 0;
+ BLOCKINDEX(block, (void *) page_sva);
+ unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
+ CACHEADAPT_POLICY_SET_HOST_CORE(policy, coren);
}
CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy);
page_sva += BAMBOO_PAGE_SIZE;
}
}
-#define GC_CACHE_ADAPT_DOMINATE_THRESHOLD 1
+#define GC_CACHE_ADAPT_DOMINATE_THRESHOLD 2
// cache the page on the core that accesses it the most if that core accesses
// it more than (GC_CACHE_ADAPT_DOMINATE_THRESHOLD)% of the total. Otherwise,
// h4h the page.
// Format: page start va + cache policy
if(hotfreq != 0) {
totalfreq=totalfreq>>GC_CACHE_ADAPT_DOMINATE_THRESHOLD;
- if((unsigned int)hotfreq < (unsigned int)totalfreq) {
+ if(hotfreq < totalfreq) {
// use hfh
policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
/*unsigned int block = 0;
// locally cache the page in the hottest core
CACHEADAPT_POLICY_SET_HOST_CORE(policy, hottestcore);
}
+ } else {
+ // reset it to be homed by its host core
+ unsigned int block = 0;
+ BLOCKINDEX(block, (void *) page_sva);
+ unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
+ CACHEADAPT_POLICY_SET_HOST_CORE(policy, coren);
}
CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy);
page_sva += BAMBOO_PAGE_SIZE;
// check the statistic data
// for each page, decide the new cache strategy
#ifdef GC_CACHE_ADAPT_POLICY1
- cacheAdapt_policy_h4h(coren);
-#elif defined GC_CACHE_ADAPT_POLICY2
- cacheAdapt_policy_local(coren);
-#elif defined GC_CACHE_ADAPT_POLICY3
+ // cacheAdapt_policy_h4h(coren);
+#elif defined(GC_CACHE_ADAPT_POLICY2)
+ //cacheAdapt_policy_local(coren);
+#elif defined(GC_CACHE_ADAPT_POLICY3)
cacheAdapt_policy_hottest(coren);
-#elif defined GC_CACHE_ADAPT_POLICY4
+#elif defined(GC_CACHE_ADAPT_POLICY4)
cacheAdapt_policy_dominate(coren);
#endif
}
// adapt the cache strategy for the mutator
void cacheAdapt_mutator() {
+#if (defined(GC_CACHE_ADAPT_POLICY4)||defined(GC_CACHE_ADAPT_POLICY3))
BAMBOO_CACHE_MF();
// check the changes and adapt them
unsigned int * tmp_p = gccachepolicytbl;
}
tmp_p += 1;
}
+#endif
}
// Cache adapt phase process for clients
//send init finish msg to core coordinator
send_msg_2(STARTUPCORE, GCFINISHPREF, BAMBOO_NUM_OF_CORE);
GC_PRINTF("Finish prefinish phase\n");
+
+#if (defined(GC_CACHE_ADAPT_POLICY4)||defined(GC_CACHE_ADAPT_POLICY3))
CACHEADAPT_SAMPLING_RESET();
if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
// zero out the gccachesamplingtbl
BAMBOO_MEMSET_WH(gccachesamplingtbl_local,0,size_cachesamplingtbl_local);
BAMBOO_MEMSET_WH(gccachesamplingtbl_local_r,0,size_cachesamplingtbl_local_r);
}
+#endif
}
extern unsigned long long gc_output_cache_policy_time;
// Cache adpat phase process for the master
void cacheAdapt_phase_master() {
- GCPROFILE_ITEM();
+ GCPROFILE_ITEM_MASTER();
unsigned long long tmpt = BAMBOO_GET_EXE_TIME();
CACHEADAPT_OUTPUT_CACHE_SAMPLING_R();
gc_output_cache_policy_time += (BAMBOO_GET_EXE_TIME()-tmpt);
cacheAdapt_gc(false);
GC_CHECK_ALL_CORE_STATUS();
+#if (defined(GC_CACHE_ADAPT_POLICY4)||defined(GC_CACHE_ADAPT_POLICY3))
CACHEADAPT_SAMPLING_RESET();
if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
// zero out the gccachesamplingtbl
BAMBOO_MEMSET_WH(gccachesamplingtbl_local_r,0,size_cachesamplingtbl_local_r);
BAMBOO_MEMSET_WH(gccachepolicytbl,0,size_cachepolicytbl);
}
+#endif
}
// output original cache sampling data for each page