void samplingDataReviseInit(struct moveHelper * orig,struct moveHelper * to) {
// initialize the destination page info
gc_cache_revise_information.to_page_start_va=to->ptr;
- unsigned int toindex=(unsigned INTPTR)(to->base-gcbaseva)/BAMBOO_PAGE_SIZE;
+ unsigned int toindex=(unsigned INTPTR)(to->base-gcbaseva)>>BAMBOO_PAGE_SIZE_BITS;
gc_cache_revise_information.to_page_end_va=gcbaseva+BAMBOO_PAGE_SIZE*(toindex+1);
gc_cache_revise_information.to_page_index=toindex;
// initilaize the original page info
- unsigned int origindex=((unsigned INTPTR)(orig->base-gcbaseva))/BAMBOO_PAGE_SIZE;
+ unsigned int origindex=((unsigned INTPTR)(orig->base-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
gc_cache_revise_information.orig_page_start_va=orig->ptr;
gc_cache_revise_information.orig_page_end_va=gcbaseva+BAMBOO_PAGE_SIZE*(origindex+1);
gc_cache_revise_information.orig_page_index=origindex;
// compute the impact of the original page(s) for the desitination page(s)
samplingDataConvert(current_ptr);
// prepare for an new orig page
- unsigned INTPTR tmp_index=((unsigned INTPTR)(origptr-gcbaseva))/BAMBOO_PAGE_SIZE;
+ unsigned INTPTR tmp_index=((unsigned INTPTR)(origptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
gc_cache_revise_information.orig_page_start_va=origptr;
gc_cache_revise_information.orig_page_end_va=gcbaseva+BAMBOO_PAGE_SIZE*(tmp_index+1);
gc_cache_revise_information.orig_page_index=tmp_index;
gc_cache_revise_information.to_page_start_va=toptr;
if(closeToPage) {
- unsigned INTPTR to_index=((unsigned INTPTR)(toptr-gcbaseva))/BAMBOO_PAGE_SIZE;
+ unsigned INTPTR to_index=((unsigned INTPTR)(toptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
gc_cache_revise_information.to_page_end_va=gcbaseva+BAMBOO_PAGE_SIZE*(to_index+1);
gc_cache_revise_information.to_page_index=to_index;
}
// clean the dtlb entries
BAMBOO_CLEAN_DTLB();
- // change the cache strategy
- gccachestage = isgccachestage;
+ if(isgccachestage) {
+ bamboo_install_dtlb_handler_for_gc();
+ } else {
+ bamboo_install_dtlb_handler_for_mutator();
+ }
}
// the master core decides how to adapt cache strategy for the mutator
// make all pages hfh
void cacheAdapt_policy_h4h(int coren){
- unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)/(BAMBOO_PAGE_SIZE);
+ unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)>>(BAMBOO_PAGE_SIZE_BITS);
unsigned int page_gap=page_num/NUMCORESACTIVE;
unsigned int page_index=page_gap*coren;
unsigned int page_index_end=(coren==NUMCORESACTIVE-1)?page_num:(page_index+page_gap);
// make all pages local as non-cache-adaptable gc local mode
void cacheAdapt_policy_local(int coren){
- unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)/(BAMBOO_PAGE_SIZE);
+ unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)>>(BAMBOO_PAGE_SIZE_BITS);
unsigned int page_gap=page_num/NUMCORESACTIVE;
unsigned int page_index=page_gap*coren;
unsigned int page_index_end=(coren==NUMCORESACTIVE-1)?page_num:(page_index+page_gap);
}
void cacheAdapt_policy_hottest(int coren){
- unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)/(BAMBOO_PAGE_SIZE);
+ unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)>>(BAMBOO_PAGE_SIZE_BITS);
unsigned int page_gap=page_num/NUMCORESACTIVE;
unsigned int page_index=page_gap*coren;
unsigned int page_index_end=(coren==NUMCORESACTIVE-1)?page_num:(page_index+page_gap);
// it more than (GC_CACHE_ADAPT_DOMINATE_THRESHOLD)% of the total. Otherwise,
// h4h the page.
void cacheAdapt_policy_dominate(int coren){
- unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)/(BAMBOO_PAGE_SIZE);
+ unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)>>(BAMBOO_PAGE_SIZE_BITS);
unsigned int page_gap=page_num/NUMCORESACTIVE;
unsigned int page_index=page_gap*coren;
unsigned int page_index_end=(coren==NUMCORESACTIVE-1)?page_num:(page_index+page_gap);
// output original cache sampling data for each page
void gc_output_cache_sampling() {
- //extern volatile bool gc_profile_flag;
- //if(!gc_profile_flag) return;
+ extern volatile bool gc_profile_flag;
+ if(!gc_profile_flag) return;
unsigned int page_index = 0;
VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
+ unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) >> (BAMBOO_PAGE_SIZE_BITS);
for(page_index = 0; page_index < page_num; page_index++) {
page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
unsigned int block = 0;
// output revised cache sampling data for each page after compaction
void gc_output_cache_sampling_r() {
- //extern volatile bool gc_profile_flag;
- //if(!gc_profile_flag) return;
+ extern volatile bool gc_profile_flag;
+ if(!gc_profile_flag) return;
// TODO summary data
unsigned int sumdata[NUMCORESACTIVE][NUMCORESACTIVE];
for(int i = 0; i < NUMCORESACTIVE; i++) {
tprintf("cache sampling_r \n");
unsigned int page_index = 0;
VA page_sva = 0;
- unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
+ unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) >> (BAMBOO_PAGE_SIZE_BITS);
for(page_index = 0; page_index < page_num; page_index++) {
page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
unsigned int block = 0;
#define BAMBOO_SHARED_RUNTIME_PAGE_SIZE (1<<24) //16M
+#define BAMBOO_PAGE_SIZE ((unsigned int)(64 * 1024)) // 64K
+#define BAMBOO_PAGE_SIZE_BITS (16)
+#ifdef GC_LARGEPAGESIZE
+#define BAMBOO_PAGE_SIZE ((unsigned int)(4 * 64 * 1024))
+#define BAMBOO_PAGE_SIZE_BITS (18)
+#elif defined GC_LARGEPAGESIZE2
+#define BAMBOO_PAGE_SIZE ((unsigned int)(4 * 64 * 1024)) // 64K
+#define BAMBOO_PAGE_SIZE_BITS (18)
+#endif
+
#ifdef GC_DEBUG
#include "structdefs.h"
#define BAMBOO_NUM_BLOCKS (NUMCORES4GC*(2+3))
#else
#define BAMBOO_NUM_BLOCKS ((unsigned int)((GC_BAMBOO_NUMCORES)*(2+14)))
#endif
-#define BAMBOO_PAGE_SIZE ((unsigned int)(64 * 1024)) // 64K
-#define BAMBOO_PAGE_SIZE_BITS (16)
#ifdef GC_LARGEPAGESIZE
-#define BAMBOO_PAGE_SIZE ((unsigned int)(4 * 64 * 1024))
-#define BAMBOO_PAGE_SIZE_BITS (18)
#define BAMBOO_SMEM_SIZE ((unsigned int)(4 * (BAMBOO_PAGE_SIZE)))
#elif defined GC_SMALLPAGESIZE
#define BAMBOO_SMEM_SIZE ((unsigned int)(BAMBOO_PAGE_SIZE))
#elif defined GC_SMALLPAGESIZE2
-//#define BAMBOO_PAGE_SIZE ((unsigned int)(16 * 1024)) // (4096)
#define BAMBOO_SMEM_SIZE ((unsigned int)(BAMBOO_PAGE_SIZE))
#elif defined GC_LARGEPAGESIZE2
-#define BAMBOO_PAGE_SIZE ((unsigned int)(4 * 64 * 1024)) // 64K
-#define BAMBOO_PAGE_SIZE_BITS (18)
#define BAMBOO_SMEM_SIZE ((unsigned int)(BAMBOO_PAGE_SIZE))
#elif defined MGC
#define BAMBOO_SMEM_SIZE ((unsigned int)(16*(BAMBOO_PAGE_SIZE))) // 1M
#define BAMBOO_NUM_BLOCKS ((unsigned int)((GC_BAMBOO_NUMCORES)*(2+2))) //(15 * 1024) //(64 * 4 * 0.75) //(1024 * 1024 * 3.5) 3G
#endif
#ifdef GC_LARGEPAGESIZE
-#define BAMBOO_PAGE_SIZE ((unsigned int)(4 * 1024 * 1024)) // (4096)
-#define BAMBOO_PAGE_SIZE_BITS (22)
-#define BAMBOO_SMEM_SIZE ((unsigned int)(BAMBOO_PAGE_SIZE))
+#define BAMBOO_SMEM_SIZE ((unsigned int)(BAMBOO_PAGE_SIZE*16))
#elif defined GC_SMALLPAGESIZE
#define BAMBOO_PAGE_SIZE ((unsigned int)(256 * 1024)) // (4096)
#define BAMBOO_PAGE_SIZE_BITS (18)
#define BAMBOO_PAGE_SIZE_BITS (18)
#define BAMBOO_SMEM_SIZE ((unsigned int)(BAMBOO_PAGE_SIZE))
#else
-#define BAMBOO_PAGE_SIZE ((unsigned int)(1024 * 1024)) // (4096)
-#define BAMBOO_PAGE_SIZE_BITS (20)
-#define BAMBOO_SMEM_SIZE ((unsigned int)(BAMBOO_PAGE_SIZE))
+#define BAMBOO_SMEM_SIZE ((unsigned int)(BAMBOO_PAGE_SIZE*16))
#endif // GC_LARGEPAGESIZE
#define BAMBOO_SHARED_MEM_SIZE ((unsigned int)((BAMBOO_SMEM_SIZE) * (BAMBOO_NUM_BLOCKS))) //(1024 * 1024 * 240) //((unsigned long long int)(3.0 * 1024 * 1024 * 1024)) // 3G
#endif // GC_DEBUG