Missing changes
[IRC.git] / Robust / src / Runtime / bamboo / multicorecache.c
index 812a77893a0a09e270e7f80d5d60bb84e059a0b4..6c41b9f49adfecd46f5d7cee91c04dfd12fb7c66 100644 (file)
 #ifdef GC_CACHE_ADAPT
 #include "multicorecache.h"
+#include "multicoremsg.h"
+#include "multicoregc.h"
+#include "multicoregcprofile.h"
+
+void cacheadapt_finish_compact(void *toptr) {
+  unsigned int dstpage=((unsigned INTPTR)(toptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
+  unsigned int * newtable=&gccachesamplingtbl_r[dstpage*NUMCORESACTIVE];
+
+  for(int core = 0; core < NUMCORESACTIVE; core++) {
+    (*newtable)=(*newtable)>>6;
+    newtable++;
+  }  
+}
+
+void cacheadapt_finish_src_page(void *srcptr, void *tostart, void *tofinish) {
+  unsigned int srcpage=((unsigned INTPTR)(srcptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
+  unsigned int dstpage=((unsigned INTPTR)(tostart-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
+  unsigned int numbytes=tofinish-tostart;
+  
+  unsigned int * oldtable=&gccachesamplingtbl[srcpage*NUMCORESACTIVE];
+  unsigned int * newtable=&gccachesamplingtbl_r[dstpage*NUMCORESACTIVE];
+  
+  unsigned int page64th=numbytes>>(BAMBOO_PAGE_SIZE_BITS-6);
 
-typedef struct gc_cache_revise_info {
-  unsigned int orig_page_start_va;
-  unsigned int orig_page_end_va;
-  unsigned int orig_page_index;
-  unsigned int to_page_start_va;
-  unsigned int to_page_end_va;
-  unsigned int to_page_index;
-  unsigned int revised_sampling[NUMCORESACTIVE];
-} gc_cache_revise_info_t;
-gc_cache_revise_info_t gc_cache_revise_infomation;
-
-INLINE void samplingDataInit() {
-  gc_cache_revise_infomation.to_page_start_va = (unsigned int)to->ptr;
-  unsigned int toindex = (unsigned int)(tobase-gcbaseva)/(BAMBOO_PAGE_SIZE);
-  gc_cache_revise_infomation.to_page_end_va = gcbaseva + 
-    (BAMBOO_PAGE_SIZE)*(toindex+1);
-  gc_cache_revise_infomation.to_page_index = toindex;
-  gc_cache_revise_infomation.orig_page_start_va = (unsigned int)orig->ptr;
-  gc_cache_revise_infomation.orig_page_end_va = gcbaseva+(BAMBOO_PAGE_SIZE)
-  *(((unsigned int)(orig->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE)+1);
-  gc_cache_revise_infomation.orig_page_index = 
-    ((unsigned int)(orig->blockbase)-gcbaseva)/(BAMBOO_PAGE_SIZE);
+  for(int core = 0; core < NUMCORESACTIVE; core++) {
+    (*newtable)+=page64th*(*oldtable);
+    newtable++;
+    oldtable++;
+  }  
 }
 
-INLINE void samplingDataConvert(unsigned int current_ptr) {
-  unsigned int tmp_factor = 
-  current_ptr-gc_cache_revise_infomation.to_page_start_va;
-  unsigned int topage=gc_cache_revise_infomation.to_page_index;
-  unsigned int oldpage = gc_cache_revise_infomation.orig_page_index;
-  int * newtable=&gccachesamplingtbl_r[topage];
-  int * oldtable=&gccachesamplingtbl[oldpage];
+/* Bytes needed equal to zero is a special case...  It means that we should finish the dst page */
+
+void cacheadapt_finish_dst_page(void *origptr, void *tostart, void *toptr, unsigned int bytesneeded) {
+  unsigned int numbytes=toptr-tostart;
+
+  void *tobound=(void *)((((unsigned INTPTR)toptr-1)&~(BAMBOO_PAGE_SIZE-1))+BAMBOO_PAGE_SIZE);
+  void *origbound=(void *)((((unsigned INTPTR)origptr)&~(BAMBOO_PAGE_SIZE-1))+BAMBOO_PAGE_SIZE);
   
-  for(int tt = 0; tt < NUMCORESACTIVE; tt++) {
-    (*newtable) = ((*newtable)+(*oldtable)*tmp_factor);
-    newtable=(int*)(((char *)newtable)+size_cachesamplingtbl_local_r);
-    oldtable=(int*) (((char *)oldtable)+size_cachesamplingtbl_local);
-  }
-} 
+  unsigned int topage=((unsigned INTPTR)(toptr-1-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS; 
+  unsigned int origpage=((unsigned INTPTR)(origptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;
 
-INLINE void completePageConvert(struct moveHelper * orig,
-                                struct moveHelper * to,
-                                unsigned int current_ptr,
-                                bool closeToPage) {
-  unsigned int ptr = 0;
-  unsigned int tocompare = 0;
-  if(closeToPage) {
-    ptr = to->ptr;
-    tocompare = gc_cache_revise_infomation.to_page_end_va;
-  } else {
-    ptr = orig->ptr;
-    tocompare = gc_cache_revise_infomation.orig_page_end_va;
-  }
-  if((unsigned int)ptr >= (unsigned int)tocompare) {
-    // end of an orig/to page
-    // compute the impact of this page for the new page
-    samplingDataConvert(current_ptr);
-    // prepare for an new orig page
-    unsigned int tmp_index = 
-      (unsigned int)((unsigned int)orig->ptr-gcbaseva)/(BAMBOO_PAGE_SIZE);
-    gc_cache_revise_infomation.orig_page_start_va = orig->ptr;
-    gc_cache_revise_infomation.orig_page_end_va = gcbaseva + 
-      (BAMBOO_PAGE_SIZE)*(unsigned int)(tmp_index+1);
-    gc_cache_revise_infomation.orig_page_index = tmp_index;
-    gc_cache_revise_infomation.to_page_start_va = to->ptr;
-    if(closeToPage) {
-      gc_cache_revise_infomation.to_page_end_va = gcbaseva+(BAMBOO_PAGE_SIZE)
-        *(((unsigned int)(to->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE)+1);
-      gc_cache_revise_infomation.to_page_index = 
-        ((unsigned int)(to->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE);
+  unsigned int * totable=&gccachesamplingtbl_r[topage*NUMCORESACTIVE];
+  unsigned int * origtable=&gccachesamplingtbl[origpage*NUMCORESACTIVE];
+
+  //handler
+  unsigned int remaintobytes=(bytesneeded==0)?0:(tobound-toptr);
+  unsigned int remainorigbytes=origbound-origptr;
+
+  do {
+    //round source bytes down....don't want to close out page if not necessary
+    remainorigbytes=(remainorigbytes>bytesneeded)?bytesneeded:remainorigbytes;
+
+    if (remaintobytes<=remainorigbytes) {
+      //Need to close out to page
+
+      numbytes+=remaintobytes;
+      unsigned int page64th=numbytes>>(BAMBOO_PAGE_SIZE_BITS-6);
+
+      for(int core = 0; core < NUMCORESACTIVE; core++) {
+       (*totable)=(*totable+page64th*(*origtable))>>6;
+       totable++;
+       origtable++;
+      }
+      toptr+=remaintobytes;
+      origptr+=remaintobytes;
+      bytesneeded-=remaintobytes;
+      topage++;//to page is definitely done
+      tobound+=BAMBOO_PAGE_SIZE;
+      origpage=((unsigned INTPTR)(origptr-gcbaseva))>>BAMBOO_PAGE_SIZE_BITS;//handle exact match case
+      origbound=(void *) ((((unsigned INTPTR)origptr)&~(BAMBOO_PAGE_SIZE-1))+BAMBOO_PAGE_SIZE);
+    } else {
+      //Finishing off orig page
+
+      numbytes+=remainorigbytes;
+      unsigned int page64th=numbytes>>(BAMBOO_PAGE_SIZE_BITS-6);
+      
+      for(int core = 0; core < NUMCORESACTIVE; core++) {
+       (*totable)+=page64th*(*origtable);
+       totable++;
+       origtable++;
+      }
+      toptr+=remainorigbytes;
+      origptr+=remainorigbytes;
+      bytesneeded-=remainorigbytes;
+      origpage++;//just orig page is done
+      origbound+=BAMBOO_PAGE_SIZE;
     }
-  }
-} 
+    totable=&gccachesamplingtbl_r[topage*NUMCORESACTIVE];
+    origtable=&gccachesamplingtbl[origpage*NUMCORESACTIVE];
+    
+    remaintobytes=tobound-toptr;
+    remainorigbytes=origbound-origptr;
+    
+    numbytes=0;
+  } while(bytesneeded!=0);
+}
 
 // prepare for cache adaption:
 //   -- flush the shared heap
 //   -- clean dtlb entries
 //   -- change cache strategy
 void cacheAdapt_gc(bool isgccachestage) {
+#ifdef GC_CACHE_COHERENT_ON
+  if(!isgccachestage) {
+    // get out of GC
+#if (defined(GC_CACHE_ADAPT_POLICY4)||defined(GC_CACHE_ADAPT_POLICY3))
+    // flush the shared heap
+    BAMBOO_CACHE_FLUSH_L2();
+
+    // clean the dtlb entries
+    BAMBOO_CLEAN_DTLB();
+#endif
+  } 
+#else
   // flush the shared heap
   BAMBOO_CACHE_FLUSH_L2();
 
   // clean the dtlb entries
   BAMBOO_CLEAN_DTLB();
 
-  // change the cache strategy
-  gccachestage = isgccachestage;
+  if(isgccachestage) {
+    bamboo_install_dtlb_handler_for_gc();
+  } else {
+    bamboo_install_dtlb_handler_for_mutator();
+  }
+#endif
 } 
 
 // the master core decides how to adapt cache strategy for the mutator 
 // according to collected statistic data
 
+// find the core that accesses the page #page_index most
+#define CACHEADAPT_FIND_HOTTEST_CORE(page_index,hottestcore,hotfreq) \
+  { \
+    unsigned int *local_tbl=&gccachesamplingtbl_r[page_index*NUMCORESACTIVE];  \
+    for(int i = 0; i < NUMCORESACTIVE; i++) { \
+      int freq = *local_tbl; \
+      local_tbl++; \
+      if(hotfreq < freq) { \
+        hotfreq = freq; \
+        hottestcore = i; \
+      } \
+    } \
+  }
+// find the core that accesses the page #page_index most and comput the total
+// access time of the page at the same time
+#define CACHEADAPT_FIND_HOTTEST_CORE_W_TOTALFREQ(page_index,hottestcore,hotfreq,totalfreq) \
+  { \
+    unsigned int *local_tbl=&gccachesamplingtbl_r[page_index*NUMCORESACTIVE];  \
+    for(int i = 0; i < NUMCORESACTIVE; i++) { \
+      int freq = *local_tbl; \
+      local_tbl++; \
+      totalfreq += freq; \
+      if(hotfreq < freq) { \
+        hotfreq = freq; \
+        hottestcore = i; \
+      } \
+    } \
+  }
+// Set the policy as hosted by coren
+// NOTE: (x,y) should be changed to (x+1, y+1)!!!
+#define CACHEADAPT_POLICY_SET_HOST_CORE(policy, coren) \
+  { \
+    (policy).cache_mode = BAMBOO_CACHE_MODE_COORDS; \    
+    (policy).lotar_x = bamboo_cpu2coords[2*(coren)]+1; \
+    (policy).lotar_y = bamboo_cpu2coords[2*(coren)+1]+1; \
+  }
+// store the new policy information at tmp_p in gccachepolicytbl
+#define CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy) \
+  { \
+    ((int*)(tmp_p))[page_index] = (policy).word; \
+  }
+
 // make all pages hfh
-int cacheAdapt_policy_h4h(){
-  unsigned int page_index = 0;
-  VA page_sva = 0;
-  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
-  unsigned int numchanged = 0;
-  int * tmp_p = gccachepolicytbl+1;
-  for(page_index = 0; page_index < page_num; page_index++) {
-    page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
+void cacheAdapt_policy_h4h(int coren){
+  unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)>>(BAMBOO_PAGE_SIZE_BITS);
+  unsigned int page_gap=page_num/NUMCORESACTIVE;
+  unsigned int page_index=page_gap*coren;
+  unsigned int page_index_end=(coren==NUMCORESACTIVE-1)?page_num:(page_index+page_gap);
+  VA page_sva = gcbaseva+(BAMBOO_PAGE_SIZE)*page_index;
+  unsigned int * tmp_p = gccachepolicytbl;
+  for(; page_index < page_index_end; page_index++) {
     bamboo_cache_policy_t policy = {0};
     policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
-    *tmp_p = page_index;
-    tmp_p++;
-    *tmp_p = policy.word;
-    tmp_p++;
-    numchanged++;
+    CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy);
+    page_sva += BAMBOO_PAGE_SIZE;
   }
-
-  return numchanged;
 } 
 
 // make all pages local as non-cache-adaptable gc local mode
-int cacheAdapt_policy_local(){
-  unsigned int page_index = 0;
-  VA page_sva = 0;
-  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
-  unsigned int numchanged = 0;
-  int * tmp_p = gccachepolicytbl+1;
-  for(page_index = 0; page_index < page_num; page_index++) {
-    page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
+void cacheAdapt_policy_local(int coren){
+  unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)>>(BAMBOO_PAGE_SIZE_BITS);
+  unsigned int page_gap=page_num/NUMCORESACTIVE;
+  unsigned int page_index=page_gap*coren;
+  unsigned int page_index_end=(coren==NUMCORESACTIVE-1)?page_num:(page_index+page_gap);
+  VA page_sva = gcbaseva+(BAMBOO_PAGE_SIZE)*page_index;
+  unsigned int * tmp_p = gccachepolicytbl;
+  for(; page_index < page_index_end; page_index++) {
     bamboo_cache_policy_t policy = {0};
     unsigned int block = 0;
-    BLOCKINDEX(page_sva, &block);
+    BLOCKINDEX(block, (void *) page_sva);
     unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
-    // locally cache the page in the hotest core
-    // NOTE: (x,y) should be changed to (x+1, y+1)!!!
-    policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
-    policy.lotar_x = bamboo_cpu2coords[2*coren]+1;
-    policy.lotar_y = bamboo_cpu2coords[2*coren+1]+1;
-    *tmp_p = page_index;
-    tmp_p++;
-    *tmp_p = policy.word;
-    tmp_p++;
-    numchanged++;
+    CACHEADAPT_POLICY_SET_HOST_CORE(policy, coren);
+    CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy);
+    page_sva += BAMBOO_PAGE_SIZE;
   }
-
-  return numchanged;
 } 
 
-int cacheAdapt_policy_hotest(){
-  unsigned int page_index = 0;
-  VA page_sva = 0;
-  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
-  unsigned int numchanged = 0;
-  int * tmp_p = gccachepolicytbl+1;
-  for(page_index = 0; page_index < page_num; page_index++) {
-    page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
+void cacheAdapt_policy_hottest(int coren){
+  unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)>>(BAMBOO_PAGE_SIZE_BITS);
+  unsigned int page_gap=page_num/NUMCORESACTIVE;
+  unsigned int page_index=page_gap*coren;
+  unsigned int page_index_end=(coren==NUMCORESACTIVE-1)?page_num:(page_index+page_gap);
+  VA page_sva = gcbaseva+(BAMBOO_PAGE_SIZE)*page_index;
+  unsigned int * tmp_p = gccachepolicytbl;
+  for(; page_index < page_index_end; page_index++) {
     bamboo_cache_policy_t policy = {0};
-    unsigned int hotestcore = 0;
+    unsigned int hottestcore = 0;
     unsigned int hotfreq = 0;
-
-    int *local_tbl=&gccachesamplingtbl_r[page_index];
-    for(int i = 0; i < NUMCORESACTIVE; i++) {
-      int freq = *local_tbl;
-      local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
-
-      // check the freqency, decide if this page is hot for the core
-      if(hotfreq < freq) {
-        hotfreq = freq;
-        hotestcore = i;
-      }
-    }
+    CACHEADAPT_FIND_HOTTEST_CORE(page_index,hottestcore,hotfreq);
     // TODO
     // Decide the cache strategy for this page
     // If decide to adapt a new cache strategy, write into the shared block of
     // the gcsharedsamplingtbl. The mem recording information that has been 
     // written is enough to hold the information.
     // Format: page start va + cache strategy(hfh/(host core+[x,y]))
-    if(hotfreq == 0) {
-      // this page has not been accessed, do not change its cache policy
-      continue;
+    if(hotfreq != 0) {
+      // locally cache the page in the hottest core
+      CACHEADAPT_POLICY_SET_HOST_CORE(policy, hottestcore);
     } else {
-      // locally cache the page in the hotest core
-      // NOTE: (x,y) should be changed to (x+1, y+1)!!!
-      policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
-      policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
-      policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
-      *tmp_p = page_index;
-      tmp_p++;
-      *tmp_p = policy.word;
-      tmp_p++;
-      numchanged++;
+      // reset it to be homed by its host core
+      unsigned int block = 0;
+      BLOCKINDEX(block, (void *) page_sva);
+      unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
+      CACHEADAPT_POLICY_SET_HOST_CORE(policy, coren);
     }
+    CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy);
+    page_sva += BAMBOO_PAGE_SIZE;
   }
-
-  return numchanged;
 } 
 
-#define GC_CACHE_ADAPT_DOMINATE_THRESHOLD  50
+#define GC_CACHE_ADAPT_DOMINATE_THRESHOLD  2
 // cache the page on the core that accesses it the most if that core accesses 
 // it more than (GC_CACHE_ADAPT_DOMINATE_THRESHOLD)% of the total.  Otherwise,
 // h4h the page.
-int cacheAdapt_policy_dominate(){
-  unsigned int page_index = 0;
-  VA page_sva = 0;
-  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
-  unsigned int numchanged = 0;
-  int * tmp_p = gccachepolicytbl+1;
-  for(page_index = 0; page_index < page_num; page_index++) {
-    page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
+void cacheAdapt_policy_dominate(int coren){
+  unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)>>(BAMBOO_PAGE_SIZE_BITS);
+  unsigned int page_gap=page_num/NUMCORESACTIVE;
+  unsigned int page_index=page_gap*coren;
+  unsigned int page_index_end=(coren==NUMCORESACTIVE-1)?page_num:(page_index+page_gap);
+  VA page_sva = gcbaseva+(BAMBOO_PAGE_SIZE)*page_index;
+  unsigned int * tmp_p = gccachepolicytbl;
+  for(; page_index < page_index_end; page_index++) {
     bamboo_cache_policy_t policy = {0};
-    unsigned int hotestcore = 0;
-    unsigned long long totalfreq = 0;
+    unsigned int hottestcore = 0;
+    unsigned int totalfreq = 0;
     unsigned int hotfreq = 0;
-  
-    int *local_tbl=&gccachesamplingtbl_r[page_index];
-    for(int i = 0; i < NUMCORESACTIVE; i++) {
-      int freq = *local_tbl;
-      local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
-      totalfreq += freq;
-      // check the freqency, decide if this page is hot for the core
-      if(hotfreq < freq) {
-        hotfreq = freq;
-        hotestcore = i;
-      }
-    }
-
+    CACHEADAPT_FIND_HOTTEST_CORE_W_TOTALFREQ(page_index,hottestcore,hotfreq,totalfreq);
     // Decide the cache strategy for this page
     // If decide to adapt a new cache strategy, write into the shared block of
     // the gcpolicytbl 
     // Format: page start va + cache policy
-    if(hotfreq == 0) {
-      // this page has not been accessed, do not change its cache policy
-      continue;
-    }
-    totalfreq = 
-      (totalfreq*GC_CACHE_ADAPT_DOMINATE_THRESHOLD)/100/BAMBOO_PAGE_SIZE;
-    hotfreq/=BAMBOO_PAGE_SIZE;
-    if(hotfreq < totalfreq) {
-      // use hfh
-      policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
-    } else {
-      // locally cache the page in the hotest core
-      // NOTE: (x,y) should be changed to (x+1, y+1)!!!
-      policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
-      policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
-      policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
-    }
-    *tmp_p = page_index;
-    tmp_p++;
-    *tmp_p = policy.word;    
-    tmp_p++;
-    numchanged++;
-  }
-
-  return numchanged;
-}
-
-#define GC_CACHE_ADAPT_OVERLOAD_THRESHOLD 10
-
-void gc_quicksort(unsigned long long *array,
-                  unsigned int left,
-                  unsigned int right,
-                  unsigned int offset) {
-  unsigned int pivot = 0;;
-  unsigned int leftIdx = left;
-  unsigned int rightIdx = right;
-  if((right-left+1) >= 1) {
-    pivot = (left+right)/2;
-    while((leftIdx <= pivot) && (rightIdx >= pivot)) {
-      unsigned long long pivotValue = array[pivot*3-offset];
-      while((array[leftIdx*3-offset] > pivotValue) && (leftIdx <= pivot)) {
-        leftIdx++;
-      }
-      while((array[rightIdx*3-offset] < pivotValue) && (rightIdx >= pivot)) {
-        rightIdx--;
-      }
-      // swap [leftIdx] & [rightIdx]
-      for(int k = 0; k < 3; k++) {
-        unsigned long long tmp = array[3*rightIdx-k];
-        array[3*rightIdx-k] = array[3*leftIdx-k];
-        array[3*leftIdx-k] = tmp;
-      }
-      leftIdx++;
-      rightIdx--;
-      if((leftIdx-1) == pivot) {
-        pivot = rightIdx = rightIdx + 1;
-      } else if((leftIdx+1) == pivot) {
-        pivot = leftIdx = leftIdx-1;
-      }
-    }
-    gc_quicksort(array, left, pivot-1, offset);
-    gc_quicksort(array, pivot+1, right, offset);
-  }
-  return;
-}
-
-// Every page cached on the core that accesses it the most. 
-// Check to see if any core's pages total more accesses than threshold 
-// GC_CACHE_ADAPT_OVERLOAD_THRESHOLD.  If so, find the pages with the 
-// most remote accesses and hash for home them until we get below 
-// GC_CACHE_ADAPT_OVERLOAD_THRESHOLD
-int cacheAdapt_policy_overload(){
-  unsigned int page_index = 0;
-  VA page_sva = 0;
-  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
-  unsigned int numchanged = 0;
-  int * tmp_p = gccachepolicytbl+1;
-  unsigned long long workload[NUMCORESACTIVE];
-  memset(workload, 0, NUMCORESACTIVE*sizeof(unsigned long long));
-  unsigned long long total_workload = 0;
-  unsigned long long core2heavypages[NUMCORESACTIVE][page_num*3+1];
-  memset(core2heavypages,0,
-      sizeof(unsigned long long)*(page_num*3+1)*NUMCORESACTIVE);
-  for(page_index = 0; page_index < page_num; page_index++) {
-    page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
-    bamboo_cache_policy_t policy = {0};
-    unsigned int hotestcore = 0;
-    unsigned long long totalfreq = 0;
-    unsigned int hotfreq = 0;
-  
-    int *local_tbl=&gccachesamplingtbl_r[page_index];
-    for(int i = 0; i < NUMCORESACTIVE; i++) {
-      int freq = *local_tbl;
-      local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
-      totalfreq += freq;
-      // check the freqency, decide if this page is hot for the core
-      if(hotfreq < freq) {
-        hotfreq = freq;
-        hotestcore = i;
-      }
-    }
-    // Decide the cache strategy for this page
-    // If decide to adapt a new cache strategy, write into the shared block of
-    // the gcsharedsamplingtbl. The mem recording information that has been 
-    // written is enough to hold the information.
-    // Format: page start va + cache strategy(hfh/(host core+[x,y]))
-    if(hotfreq == 0) {
-      // this page has not been accessed, do not change its cache policy
-      continue;
-    }
-
-    totalfreq/=BAMBOO_PAGE_SIZE;
-    hotfreq/=BAMBOO_PAGE_SIZE;
-    // locally cache the page in the hotest core
-    // NOTE: (x,y) should be changed to (x+1, y+1)!!!
-    policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
-    policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
-    policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
-    *tmp_p = page_index;
-    tmp_p++;
-    *tmp_p = policy.word;
-    tmp_p++;
-    numchanged++;
-    workload[hotestcore] += totalfreq;
-    total_workload += totalfreq;
-    // insert into core2heavypages using quicksort
-    unsigned long long remoteaccess = totalfreq - hotfreq;
-    unsigned int index = (unsigned int)core2heavypages[hotestcore][0];
-    core2heavypages[hotestcore][3*index+3] = remoteaccess;
-    core2heavypages[hotestcore][3*index+2] = totalfreq;
-    core2heavypages[hotestcore][3*index+1] = (unsigned long long)(tmp_p-1);
-    core2heavypages[hotestcore][0]++;
-  }
-
-  unsigned long long workload_threshold = 
-  total_workload/GC_CACHE_ADAPT_OVERLOAD_THRESHOLD;
-  // Check the workload of each core
-  for(int i = 0; i < NUMCORESACTIVE; i++) {
-    int j = 1;
-    unsigned int index = (unsigned int)core2heavypages[i][0];
-    if(workload[i] > workload_threshold) {
-      // sort according to the remoteaccess
-      gc_quicksort(&core2heavypages[i][0], 1, index, 0);
-      while((workload[i] > workload_threshold) && (j<index*3)) {
-        // hfh those pages with more remote accesses 
-        bamboo_cache_policy_t policy = {0};
+    if(hotfreq != 0) {
+      totalfreq=totalfreq>>GC_CACHE_ADAPT_DOMINATE_THRESHOLD;
+      if(hotfreq < totalfreq) {
+        // use hfh
         policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
-        *((unsigned int*)core2heavypages[i][j]) = policy.word;
-        workload[i] -= core2heavypages[i][j+1];
-        j += 3;
-      }
+        /*unsigned int block = 0;
+        BLOCKINDEX(block, (void *) page_sva);
+        unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
+        CACHEADAPT_POLICY_SET_HOST_CORE(policy, coren);*/
+      } else {
+        // locally cache the page in the hottest core
+        CACHEADAPT_POLICY_SET_HOST_CORE(policy, hottestcore);
+      }     
+    } else {
+      // reset it to be homed by its host core
+      unsigned int block = 0;
+      BLOCKINDEX(block, (void *) page_sva);
+      unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
+      CACHEADAPT_POLICY_SET_HOST_CORE(policy, coren);
     }
+    CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy);
+    page_sva += BAMBOO_PAGE_SIZE;
   }
-
-  return numchanged;
 }
 
-#define GC_CACHE_ADAPT_ACCESS_THRESHOLD 70
-#define GC_CACHE_ADAPT_CROWD_THRESHOLD  20
-// Every page cached on the core that accesses it the most. 
-// Check to see if any core's pages total more accesses than threshold 
-// GC_CACHE_ADAPT_OVERLOAD_THRESHOLD.  If so, find the pages with the 
-// most remote accesses and hash for home them until we get below 
-// GC_CACHE_ADAPT_OVERLOAD_THRESHOLD.  
-// Sort pages based on activity.... 
-// If more then GC_CACHE_ADAPT_ACCESS_THRESHOLD% of the accesses for a
-// core's pages are from more than GC_CACHE_ADAPT_CROWD_THRESHOLD pages, 
-// then start hfh these pages(selecting the ones with the most remote 
-// accesses first or fewest local accesses) until we get below 
-// GC_CACHE_ADAPT_CROWD_THRESHOLD pages.
-int cacheAdapt_policy_crowd(){
-  unsigned int page_index = 0;
-  VA page_sva = 0;
-  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
-  unsigned int numchanged = 0;
-  int * tmp_p = gccachepolicytbl+1;
-  unsigned long long workload[NUMCORESACTIVE];
-  memset(workload, 0, NUMCORESACTIVE*sizeof(unsigned long long));
-  unsigned long long total_workload = 0;
-  unsigned long long core2heavypages[NUMCORESACTIVE][page_num*3+1];
-  memset(core2heavypages,0,
-    sizeof(unsigned long long)*(page_num*3+1)*NUMCORESACTIVE);
-  for(page_index = 0; page_index < page_num; page_index++) {
-    page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
-    bamboo_cache_policy_t policy = {0};
-    unsigned int hotestcore = 0;
-    unsigned long long totalfreq = 0;
-    unsigned int hotfreq = 0;
-  
-    int *local_tbl=&gccachesamplingtbl_r[page_index];
-    for(int i = 0; i < NUMCORESACTIVE; i++) {
-      int freq = *local_tbl;
-      local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
-      totalfreq += freq;
-      // check the freqency, decide if this page is hot for the core
-      if(hotfreq < freq) {
-        hotfreq = freq;
-        hotestcore = i;
-      }
-    }
-    // Decide the cache strategy for this page
-    // If decide to adapt a new cache strategy, write into the shared block of
-    // the gcsharedsamplingtbl. The mem recording information that has been 
-    // written is enough to hold the information.
-    // Format: page start va + cache strategy(hfh/(host core+[x,y]))
-    if(hotfreq == 0) {
-      // this page has not been accessed, do not change its cache policy
-      continue;
-    }
-    totalfreq/=BAMBOO_PAGE_SIZE;
-    hotfreq/=BAMBOO_PAGE_SIZE;
-    // locally cache the page in the hotest core
-    // NOTE: (x,y) should be changed to (x+1, y+1)!!!
-    policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
-    policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
-    policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
-    *tmp_p = page_index;
-    tmp_p++;
-    *tmp_p = policy.word;
-    tmp_p++;
-    numchanged++;
-    workload[hotestcore] += totalfreq;
-    total_workload += totalfreq;
-    // insert into core2heavypages using quicksort
-    unsigned long long remoteaccess = totalfreq - hotfreq;
-    unsigned int index = (unsigned int)core2heavypages[hotestcore][0];
-    core2heavypages[hotestcore][3*index+3] = remoteaccess;
-    core2heavypages[hotestcore][3*index+2] = totalfreq;
-    core2heavypages[hotestcore][3*index+1] = (unsigned long long)(tmp_p-1);
-    core2heavypages[hotestcore][0]++;
-  }
-
-  unsigned long long workload_threshold = 
-  total_workload / GC_CACHE_ADAPT_OVERLOAD_THRESHOLD;
-  // Check the workload of each core
-  for(int i = 0; i < NUMCORESACTIVE; i++) {
-    int j = 1;
-    unsigned int index = (unsigned int)core2heavypages[i][0];  
-    if(workload[i] > workload_threshold) {
-      // sort according to the remoteaccess
-      gc_quicksort(&core2heavypages[i][0], 1, index, 0);
-      while((workload[i] > workload_threshold) && (j<index*3)) {
-        // hfh those pages with more remote accesses 
-        bamboo_cache_policy_t policy = {0};
-        policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
-        *((unsigned int*)core2heavypages[i][j]) = policy.word;
-        workload[i] -= core2heavypages[i][j+1];
-        j += 3;
-      }
-    }
-
-    // Check if the accesses are crowded on few pages
-    // sort according to the total access
-inner_crowd:
-    gc_quicksort(&core2heavypages[i][0], j/3+1, index, 1);
-    unsigned long long threshold = 
-      GC_CACHE_ADAPT_ACCESS_THRESHOLD*workload[i]/100;
-    int num_crowded = 0;
-    unsigned long long t_workload = 0;
-    do {
-      t_workload += core2heavypages[i][j+num_crowded*3+1];
-      num_crowded++;
-    } while(t_workload < threshold);
-    // num_crowded <= GC_CACHE_ADAPT_CROWD_THRESHOLD and if there are enough 
-    // items, it is always == GC_CACHE_ADAPT_CROWD_THRESHOLD
-    if(num_crowded > GC_CACHE_ADAPT_CROWD_THRESHOLD) {
-      // need to hfh these pages
-      // sort the pages according to remote access
-      gc_quicksort(&core2heavypages[i][0], j/3+1, j/3+num_crowded, 0);
-      // h4h those pages with more remote accesses 
-      bamboo_cache_policy_t policy = {0};
-      policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
-      *((unsigned int*)core2heavypages[i][j]) = policy.word;
-      workload[i] -= core2heavypages[i][j+1];
-      t_workload -= core2heavypages[i][j+1];
-      j += 3;
-      threshold = GC_CACHE_ADAPT_ACCESS_THRESHOLD*workload[i]/100;
-      goto inner_crowd;
-    }
-  }
-
-  return numchanged;
-} 
-
-void cacheAdapt_master() {
-  CACHEADAPT_OUTPUT_CACHE_SAMPLING_R();
-  unsigned int numchanged = 0;
+unsigned int cacheAdapt_decision(int coren) {
+  BAMBOO_CACHE_MF();
   // check the statistic data
   // for each page, decide the new cache strategy
 #ifdef GC_CACHE_ADAPT_POLICY1
-  numchanged = cacheAdapt_policy_h4h();
-#elif defined GC_CACHE_ADAPT_POLICY2
-  numchanged = cacheAdapt_policy_local();
-#elif defined GC_CACHE_ADAPT_POLICY3
-  numchanged = cacheAdapt_policy_hotest();
-#elif defined GC_CACHE_ADAPT_POLICY4
-  numchanged = cacheAdapt_policy_dominate();
-#elif defined GC_CACHE_ADAPT_POLICY5
-  numchanged = cacheAdapt_policy_overload();
-#elif defined GC_CACHE_ADAPT_POLICY6
-  numchanged = cacheAdapt_policy_crowd();
+  //  cacheAdapt_policy_h4h(coren);
+#elif defined(GC_CACHE_ADAPT_POLICY2)
+  //cacheAdapt_policy_local(coren);
+#elif defined(GC_CACHE_ADAPT_POLICY3)
+  cacheAdapt_policy_hottest(coren);
+#elif defined(GC_CACHE_ADAPT_POLICY4)
+  cacheAdapt_policy_dominate(coren);
 #endif
-  *gccachepolicytbl = numchanged;
 }
 
 // adapt the cache strategy for the mutator
 void cacheAdapt_mutator() {
-  int numchanged = *gccachepolicytbl;
+#if (defined(GC_CACHE_ADAPT_POLICY4)||defined(GC_CACHE_ADAPT_POLICY3))
+  BAMBOO_CACHE_MF();
   // check the changes and adapt them
-  int * tmp_p = gccachepolicytbl+1;
-  while(numchanged--) {
+  unsigned int * tmp_p = gccachepolicytbl;
+  unsigned int page_sva = gcbaseva;
+  for(; page_sva<gctopva; page_sva+=BAMBOO_PAGE_SIZE) {
     // read out the policy
-    int page_index = *tmp_p;
-    bamboo_cache_policy_t policy = (bamboo_cache_policy_t)(*(tmp_p+1));
+    bamboo_cache_policy_t policy = (bamboo_cache_policy_t)(*(tmp_p));
     // adapt the policy
-    bamboo_adapt_cache_policy(page_index*(BAMBOO_PAGE_SIZE)+gcbaseva, 
-        policy, BAMBOO_PAGE_SIZE);
-
-    tmp_p += 2;
+    if(policy.word != 0) {
+      bamboo_adapt_cache_policy(page_sva,policy,BAMBOO_PAGE_SIZE);
+    }
+    tmp_p += 1;
   }
+#endif
 }
 
+// Cache adapt phase process for clients
 void cacheAdapt_phase_client() {
-  WAITFORGCPHASE(PREFINISHPHASE);
+  WAITFORGCPHASE(CACHEPOLICYPHASE);
+  GC_PRINTF("Start cachepolicy phase\n");
+  cacheAdapt_decision(BAMBOO_NUM_OF_CORE);
+  //send init finish msg to core coordinator
+  send_msg_2(STARTUPCORE, GCFINISHCACHEPOLICY, BAMBOO_NUM_OF_CORE);
+  GC_PRINTF("Finish cachepolicy phase\n");
 
+  WAITFORGCPHASE(PREFINISHPHASE);
   GC_PRINTF("Start prefinish phase\n");
   // cache adapt phase
   cacheAdapt_mutator();
@@ -555,75 +339,145 @@ void cacheAdapt_phase_client() {
   //send init finish msg to core coordinator
   send_msg_2(STARTUPCORE, GCFINISHPREF, BAMBOO_NUM_OF_CORE);
   GC_PRINTF("Finish prefinish phase\n");
-  CACHEADAPT_SAMPING_RESET();
+
+#if (defined(GC_CACHE_ADAPT_POLICY4)||defined(GC_CACHE_ADAPT_POLICY3))
+  CACHEADAPT_SAMPLING_RESET();
   if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
     // zero out the gccachesamplingtbl
     BAMBOO_MEMSET_WH(gccachesamplingtbl_local,0,size_cachesamplingtbl_local);  
-    BAMBOO_MEMSET_WH(gccachesamplingtbl_local_r,0,
-        size_cachesamplingtbl_local_r);
+    BAMBOO_MEMSET_WH(gccachesamplingtbl_local_r,0,size_cachesamplingtbl_local_r);
   }
+#endif
 }
 
+extern unsigned long long gc_output_cache_policy_time;
+
+// Cache adpat phase process for the master
 void cacheAdapt_phase_master() {
-  GCPROFILEITEM();
-  gcphase = PREFINISHPHASE;
+  GCPROFILE_ITEM_MASTER();
+  unsigned long long tmpt = BAMBOO_GET_EXE_TIME();
+  CACHEADAPT_OUTPUT_CACHE_SAMPLING_R();
+  gc_output_cache_policy_time += (BAMBOO_GET_EXE_TIME()-tmpt);
+  // let all cores to parallelly process the revised profile data and decide 
+  // the cache policy for each page
+  gc_status_info.gcphase = CACHEPOLICYPHASE;
+  GC_SEND_MSG_1_TO_CLIENT(GCSTARTCACHEPOLICY);
+  GC_PRINTF("Start cachepolicy phase \n");
+  // cache adapt phase
+  cacheAdapt_decision(BAMBOO_NUM_OF_CORE);
+  GC_CHECK_ALL_CORE_STATUS();
+  BAMBOO_CACHE_MF();
+
+  // let all cores to adopt new policies
+  gc_status_info.gcphase = PREFINISHPHASE;
   // Note: all cores should flush their runtime data including non-gc cores
   GC_SEND_MSG_1_TO_CLIENT(GCSTARTPREF);
   GC_PRINTF("Start prefinish phase \n");
   // cache adapt phase
   cacheAdapt_mutator();
-  CACHEADPAT_OUTPUT_CACHE_POLICY();
   cacheAdapt_gc(false);
-
-  GC_CHECK_ALL_CORE_STATUS(PREFINISHPHASE == gcphase);
-
-  CACHEADAPT_SAMPING_RESET();
+  GC_CHECK_ALL_CORE_STATUS();
+  
+#if (defined(GC_CACHE_ADAPT_POLICY4)||defined(GC_CACHE_ADAPT_POLICY3))
+  CACHEADAPT_SAMPLING_RESET();
   if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
     // zero out the gccachesamplingtbl
     BAMBOO_MEMSET_WH(gccachesamplingtbl_local,0,size_cachesamplingtbl_local);
     BAMBOO_MEMSET_WH(gccachesamplingtbl_local_r,0,size_cachesamplingtbl_local_r);
     BAMBOO_MEMSET_WH(gccachepolicytbl,0,size_cachepolicytbl);
   }
+#endif
 }
 
+// output original cache sampling data for each page
 void gc_output_cache_sampling() {
+  extern volatile bool gc_profile_flag;
+  if(!gc_profile_flag) return;
   unsigned int page_index = 0;
   VA page_sva = 0;
-  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
+  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) >> (BAMBOO_PAGE_SIZE_BITS);
   for(page_index = 0; page_index < page_num; page_index++) {
     page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
     unsigned int block = 0;
-    BLOCKINDEX(page_sva, &block);
+    BLOCKINDEX(block, (void *) page_sva);
     unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
-    tprintf("va: %x page_index: %d host: %d\n",(int)page_sva,page_index,coren);
+    //printf("%x,  %d,  %d,  ",(int)page_sva,page_index,coren);
+    unsigned int * local_tbl = &gccachesamplingtbl[page_index*NUMCORESACTIVE];
+    int accesscore = 0;
     for(int i = 0; i < NUMCORESACTIVE; i++) {
-      int * local_tbl = (int *)((void *)gccachesamplingtbl
-          +size_cachesamplingtbl_local*i);
-      int freq = local_tbl[page_index];
-      printf("%8d ",freq);
+      int freq = *local_tbl;
+      local_tbl++;
+      if(freq != 0) {
+        accesscore++;
+        //printf("%d,  ", freq);
+      }
     }
-    printf("\n");
+    if(accesscore!=0) {
+      printf("%x,  %d,  %d,  ",(int)page_sva,page_index,coren);
+      unsigned int * local_tbl = &gccachesamplingtbl[page_index*NUMCORESACTIVE];
+      for(int i = 0; i < NUMCORESACTIVE; i++) {
+        unsigned int freq = *local_tbl;
+        local_tbl++;
+        printf("%u,  ", freq);
+      }
+      printf("\n");
+    }
+    //printf("\n");
   }
   printf("=================\n");
 } 
 
+// output revised cache sampling data for each page after compaction
 void gc_output_cache_sampling_r() {
+  extern volatile bool gc_profile_flag;
+  if(!gc_profile_flag) return;
+  // TODO summary data
+  unsigned int sumdata[NUMCORESACTIVE][NUMCORESACTIVE];
+  for(int i = 0; i < NUMCORESACTIVE; i++) {
+    for(int j = 0; j < NUMCORESACTIVE; j++) {
+      sumdata[i][j] = 0;
+    }
+  }
+  tprintf("cache sampling_r \n");
   unsigned int page_index = 0;
   VA page_sva = 0;
-  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
+  unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) >> (BAMBOO_PAGE_SIZE_BITS);
   for(page_index = 0; page_index < page_num; page_index++) {
     page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
     unsigned int block = 0;
-    BLOCKINDEX(page_sva, &block);
+    BLOCKINDEX(block, (void *)page_sva);
     unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
-    tprintf("va: %x page_index: %d host: %d\n",(int)page_sva,page_index,coren);
+    //printf("%x,  %d,  %d,  ",(int)page_sva,page_index,coren);
+    int accesscore = 0; // TODO
+    unsigned int * local_tbl = &gccachesamplingtbl_r[page_index*NUMCORESACTIVE];
     for(int i = 0; i < NUMCORESACTIVE; i++) {
-      int * local_tbl = (int *)((void *)gccachesamplingtbl_r
-          +size_cachesamplingtbl_local_r*i);
-      int freq = local_tbl[page_index]/BAMBOO_PAGE_SIZE;
-      printf("%8d ",freq);
+      unsigned int freq = *local_tbl; 
+      //printf("%d,  ", freq);
+      if(freq != 0) {
+        accesscore++;// TODO
+      }
+      local_tbl++;
+    }
+    if(accesscore!=0) {
+      printf("%x,  %d,  %d,  ",(int)page_sva,page_index,coren);
+      unsigned int * local_tbl = &gccachesamplingtbl_r[page_index*NUMCORESACTIVE];
+      for(int i = 0; i < NUMCORESACTIVE; i++) {
+        unsigned int freq = *local_tbl;
+        printf("%u,  ", freq);
+        sumdata[accesscore-1][i]+=freq;
+        local_tbl++;
+      }
+      printf("\n");
+    }  
+    //printf("\n");
+  }
+  printf("+++++\n");
+  // TODO printout the summary data
+  for(int i = 0; i < NUMCORESACTIVE; i++) {
+    printf("%d  ", i);
+    for(int j = 0; j < NUMCORESACTIVE; j++) {
+      printf(" %u  ", sumdata[j][i]);
     }
-  
     printf("\n");
   }
   printf("=================\n");