2 #include "multicorecache.h"
4 typedef struct gc_cache_revise_info {
5 unsigned int orig_page_start_va;
6 unsigned int orig_page_end_va;
7 unsigned int orig_page_index;
8 unsigned int to_page_start_va;
9 unsigned int to_page_end_va;
10 unsigned int to_page_index;
11 unsigned int revised_sampling[NUMCORESACTIVE];
12 } gc_cache_revise_info_t;
13 gc_cache_revise_info_t gc_cache_revise_infomation;
15 INLINE void samplingDataInit() {
16 gc_cache_revise_infomation.to_page_start_va = (unsigned int)to->ptr;
17 unsigned int toindex = (unsigned int)(tobase-gcbaseva)/(BAMBOO_PAGE_SIZE);
18 gc_cache_revise_infomation.to_page_end_va = gcbaseva +
19 (BAMBOO_PAGE_SIZE)*(toindex+1);
20 gc_cache_revise_infomation.to_page_index = toindex;
21 gc_cache_revise_infomation.orig_page_start_va = (unsigned int)orig->ptr;
22 gc_cache_revise_infomation.orig_page_end_va = gcbaseva+(BAMBOO_PAGE_SIZE)
23 *(((unsigned int)(orig->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE)+1);
24 gc_cache_revise_infomation.orig_page_index =
25 ((unsigned int)(orig->blockbase)-gcbaseva)/(BAMBOO_PAGE_SIZE);
28 INLINE void samplingDataConvert(unsigned int current_ptr) {
29 unsigned int tmp_factor =
30 current_ptr-gc_cache_revise_infomation.to_page_start_va;
31 unsigned int topage=gc_cache_revise_infomation.to_page_index;
32 unsigned int oldpage = gc_cache_revise_infomation.orig_page_index;
33 int * newtable=&gccachesamplingtbl_r[topage];
34 int * oldtable=&gccachesamplingtbl[oldpage];
36 for(int tt = 0; tt < NUMCORESACTIVE; tt++) {
37 (*newtable) = ((*newtable)+(*oldtable)*tmp_factor);
38 newtable=(int*)(((char *)newtable)+size_cachesamplingtbl_local_r);
39 oldtable=(int*) (((char *)oldtable)+size_cachesamplingtbl_local);
43 INLINE void completePageConvert(struct moveHelper * orig,
44 struct moveHelper * to,
45 unsigned int current_ptr,
48 unsigned int tocompare = 0;
51 tocompare = gc_cache_revise_infomation.to_page_end_va;
54 tocompare = gc_cache_revise_infomation.orig_page_end_va;
56 if((unsigned int)ptr >= (unsigned int)tocompare) {
57 // end of an orig/to page
58 // compute the impact of this page for the new page
59 samplingDataConvert(current_ptr);
60 // prepare for an new orig page
61 unsigned int tmp_index =
62 (unsigned int)((unsigned int)orig->ptr-gcbaseva)/(BAMBOO_PAGE_SIZE);
63 gc_cache_revise_infomation.orig_page_start_va = orig->ptr;
64 gc_cache_revise_infomation.orig_page_end_va = gcbaseva +
65 (BAMBOO_PAGE_SIZE)*(unsigned int)(tmp_index+1);
66 gc_cache_revise_infomation.orig_page_index = tmp_index;
67 gc_cache_revise_infomation.to_page_start_va = to->ptr;
69 gc_cache_revise_infomation.to_page_end_va = gcbaseva+(BAMBOO_PAGE_SIZE)
70 *(((unsigned int)(to->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE)+1);
71 gc_cache_revise_infomation.to_page_index =
72 ((unsigned int)(to->ptr)-gcbaseva)/(BAMBOO_PAGE_SIZE);
77 // prepare for cache adaption:
78 // -- flush the shared heap
79 // -- clean dtlb entries
80 // -- change cache strategy
81 void cacheAdapt_gc(bool isgccachestage) {
82 // flush the shared heap
83 BAMBOO_CACHE_FLUSH_L2();
85 // clean the dtlb entries
88 // change the cache strategy
89 gccachestage = isgccachestage;
92 // the master core decides how to adapt cache strategy for the mutator
93 // according to collected statistic data
96 int cacheAdapt_policy_h4h(){
97 unsigned int page_index = 0;
99 unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
100 unsigned int numchanged = 0;
101 int * tmp_p = gccachepolicytbl+1;
102 for(page_index = 0; page_index < page_num; page_index++) {
103 page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
104 bamboo_cache_policy_t policy = {0};
105 policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
108 *tmp_p = policy.word;
116 // make all pages local as non-cache-adaptable gc local mode
117 int cacheAdapt_policy_local(){
118 unsigned int page_index = 0;
120 unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
121 unsigned int numchanged = 0;
122 int * tmp_p = gccachepolicytbl+1;
123 for(page_index = 0; page_index < page_num; page_index++) {
124 page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
125 bamboo_cache_policy_t policy = {0};
126 unsigned int block = 0;
127 BLOCKINDEX(page_sva, &block);
128 unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
129 // locally cache the page in the hotest core
130 // NOTE: (x,y) should be changed to (x+1, y+1)!!!
131 policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
132 policy.lotar_x = bamboo_cpu2coords[2*coren]+1;
133 policy.lotar_y = bamboo_cpu2coords[2*coren+1]+1;
136 *tmp_p = policy.word;
144 int cacheAdapt_policy_hotest(){
145 unsigned int page_index = 0;
147 unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
148 unsigned int numchanged = 0;
149 int * tmp_p = gccachepolicytbl+1;
150 for(page_index = 0; page_index < page_num; page_index++) {
151 page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
152 bamboo_cache_policy_t policy = {0};
153 unsigned int hotestcore = 0;
154 unsigned int hotfreq = 0;
156 int *local_tbl=&gccachesamplingtbl_r[page_index];
157 for(int i = 0; i < NUMCORESACTIVE; i++) {
158 int freq = *local_tbl;
159 local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
161 // check the freqency, decide if this page is hot for the core
168 // Decide the cache strategy for this page
169 // If decide to adapt a new cache strategy, write into the shared block of
170 // the gcsharedsamplingtbl. The mem recording information that has been
171 // written is enough to hold the information.
172 // Format: page start va + cache strategy(hfh/(host core+[x,y]))
174 // this page has not been accessed, do not change its cache policy
177 // locally cache the page in the hotest core
178 // NOTE: (x,y) should be changed to (x+1, y+1)!!!
179 policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
180 policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
181 policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
184 *tmp_p = policy.word;
193 #define GC_CACHE_ADAPT_DOMINATE_THRESHOLD 50
194 // cache the page on the core that accesses it the most if that core accesses
195 // it more than (GC_CACHE_ADAPT_DOMINATE_THRESHOLD)% of the total. Otherwise,
197 int cacheAdapt_policy_dominate(){
198 unsigned int page_index = 0;
200 unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
201 unsigned int numchanged = 0;
202 int * tmp_p = gccachepolicytbl+1;
203 for(page_index = 0; page_index < page_num; page_index++) {
204 page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
205 bamboo_cache_policy_t policy = {0};
206 unsigned int hotestcore = 0;
207 unsigned long long totalfreq = 0;
208 unsigned int hotfreq = 0;
210 int *local_tbl=&gccachesamplingtbl_r[page_index];
211 for(int i = 0; i < NUMCORESACTIVE; i++) {
212 int freq = *local_tbl;
213 local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
215 // check the freqency, decide if this page is hot for the core
222 // Decide the cache strategy for this page
223 // If decide to adapt a new cache strategy, write into the shared block of
225 // Format: page start va + cache policy
227 // this page has not been accessed, do not change its cache policy
231 (totalfreq*GC_CACHE_ADAPT_DOMINATE_THRESHOLD)/100/BAMBOO_PAGE_SIZE;
232 hotfreq/=BAMBOO_PAGE_SIZE;
233 if(hotfreq < totalfreq) {
235 policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
237 // locally cache the page in the hotest core
238 // NOTE: (x,y) should be changed to (x+1, y+1)!!!
239 policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
240 policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
241 policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
245 *tmp_p = policy.word;
253 #define GC_CACHE_ADAPT_OVERLOAD_THRESHOLD 10
255 void gc_quicksort(unsigned long long *array,
258 unsigned int offset) {
259 unsigned int pivot = 0;;
260 unsigned int leftIdx = left;
261 unsigned int rightIdx = right;
262 if((right-left+1) >= 1) {
263 pivot = (left+right)/2;
264 while((leftIdx <= pivot) && (rightIdx >= pivot)) {
265 unsigned long long pivotValue = array[pivot*3-offset];
266 while((array[leftIdx*3-offset] > pivotValue) && (leftIdx <= pivot)) {
269 while((array[rightIdx*3-offset] < pivotValue) && (rightIdx >= pivot)) {
272 // swap [leftIdx] & [rightIdx]
273 for(int k = 0; k < 3; k++) {
274 unsigned long long tmp = array[3*rightIdx-k];
275 array[3*rightIdx-k] = array[3*leftIdx-k];
276 array[3*leftIdx-k] = tmp;
280 if((leftIdx-1) == pivot) {
281 pivot = rightIdx = rightIdx + 1;
282 } else if((leftIdx+1) == pivot) {
283 pivot = leftIdx = leftIdx-1;
286 gc_quicksort(array, left, pivot-1, offset);
287 gc_quicksort(array, pivot+1, right, offset);
292 // Every page cached on the core that accesses it the most.
293 // Check to see if any core's pages total more accesses than threshold
294 // GC_CACHE_ADAPT_OVERLOAD_THRESHOLD. If so, find the pages with the
295 // most remote accesses and hash for home them until we get below
296 // GC_CACHE_ADAPT_OVERLOAD_THRESHOLD
297 int cacheAdapt_policy_overload(){
298 unsigned int page_index = 0;
300 unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
301 unsigned int numchanged = 0;
302 int * tmp_p = gccachepolicytbl+1;
303 unsigned long long workload[NUMCORESACTIVE];
304 memset(workload, 0, NUMCORESACTIVE*sizeof(unsigned long long));
305 unsigned long long total_workload = 0;
306 unsigned long long core2heavypages[NUMCORESACTIVE][page_num*3+1];
307 memset(core2heavypages,0,
308 sizeof(unsigned long long)*(page_num*3+1)*NUMCORESACTIVE);
309 for(page_index = 0; page_index < page_num; page_index++) {
310 page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
311 bamboo_cache_policy_t policy = {0};
312 unsigned int hotestcore = 0;
313 unsigned long long totalfreq = 0;
314 unsigned int hotfreq = 0;
316 int *local_tbl=&gccachesamplingtbl_r[page_index];
317 for(int i = 0; i < NUMCORESACTIVE; i++) {
318 int freq = *local_tbl;
319 local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
321 // check the freqency, decide if this page is hot for the core
327 // Decide the cache strategy for this page
328 // If decide to adapt a new cache strategy, write into the shared block of
329 // the gcsharedsamplingtbl. The mem recording information that has been
330 // written is enough to hold the information.
331 // Format: page start va + cache strategy(hfh/(host core+[x,y]))
333 // this page has not been accessed, do not change its cache policy
337 totalfreq/=BAMBOO_PAGE_SIZE;
338 hotfreq/=BAMBOO_PAGE_SIZE;
339 // locally cache the page in the hotest core
340 // NOTE: (x,y) should be changed to (x+1, y+1)!!!
341 policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
342 policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
343 policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
346 *tmp_p = policy.word;
349 workload[hotestcore] += totalfreq;
350 total_workload += totalfreq;
351 // insert into core2heavypages using quicksort
352 unsigned long long remoteaccess = totalfreq - hotfreq;
353 unsigned int index = (unsigned int)core2heavypages[hotestcore][0];
354 core2heavypages[hotestcore][3*index+3] = remoteaccess;
355 core2heavypages[hotestcore][3*index+2] = totalfreq;
356 core2heavypages[hotestcore][3*index+1] = (unsigned long long)(tmp_p-1);
357 core2heavypages[hotestcore][0]++;
360 unsigned long long workload_threshold =
361 total_workload/GC_CACHE_ADAPT_OVERLOAD_THRESHOLD;
362 // Check the workload of each core
363 for(int i = 0; i < NUMCORESACTIVE; i++) {
365 unsigned int index = (unsigned int)core2heavypages[i][0];
366 if(workload[i] > workload_threshold) {
367 // sort according to the remoteaccess
368 gc_quicksort(&core2heavypages[i][0], 1, index, 0);
369 while((workload[i] > workload_threshold) && (j<index*3)) {
370 // hfh those pages with more remote accesses
371 bamboo_cache_policy_t policy = {0};
372 policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
373 *((unsigned int*)core2heavypages[i][j]) = policy.word;
374 workload[i] -= core2heavypages[i][j+1];
383 #define GC_CACHE_ADAPT_ACCESS_THRESHOLD 70
384 #define GC_CACHE_ADAPT_CROWD_THRESHOLD 20
385 // Every page cached on the core that accesses it the most.
386 // Check to see if any core's pages total more accesses than threshold
387 // GC_CACHE_ADAPT_OVERLOAD_THRESHOLD. If so, find the pages with the
388 // most remote accesses and hash for home them until we get below
389 // GC_CACHE_ADAPT_OVERLOAD_THRESHOLD.
390 // Sort pages based on activity....
391 // If more then GC_CACHE_ADAPT_ACCESS_THRESHOLD% of the accesses for a
392 // core's pages are from more than GC_CACHE_ADAPT_CROWD_THRESHOLD pages,
393 // then start hfh these pages(selecting the ones with the most remote
394 // accesses first or fewest local accesses) until we get below
395 // GC_CACHE_ADAPT_CROWD_THRESHOLD pages.
396 int cacheAdapt_policy_crowd(){
397 unsigned int page_index = 0;
399 unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
400 unsigned int numchanged = 0;
401 int * tmp_p = gccachepolicytbl+1;
402 unsigned long long workload[NUMCORESACTIVE];
403 memset(workload, 0, NUMCORESACTIVE*sizeof(unsigned long long));
404 unsigned long long total_workload = 0;
405 unsigned long long core2heavypages[NUMCORESACTIVE][page_num*3+1];
406 memset(core2heavypages,0,
407 sizeof(unsigned long long)*(page_num*3+1)*NUMCORESACTIVE);
408 for(page_index = 0; page_index < page_num; page_index++) {
409 page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
410 bamboo_cache_policy_t policy = {0};
411 unsigned int hotestcore = 0;
412 unsigned long long totalfreq = 0;
413 unsigned int hotfreq = 0;
415 int *local_tbl=&gccachesamplingtbl_r[page_index];
416 for(int i = 0; i < NUMCORESACTIVE; i++) {
417 int freq = *local_tbl;
418 local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r);
420 // check the freqency, decide if this page is hot for the core
426 // Decide the cache strategy for this page
427 // If decide to adapt a new cache strategy, write into the shared block of
428 // the gcsharedsamplingtbl. The mem recording information that has been
429 // written is enough to hold the information.
430 // Format: page start va + cache strategy(hfh/(host core+[x,y]))
432 // this page has not been accessed, do not change its cache policy
435 totalfreq/=BAMBOO_PAGE_SIZE;
436 hotfreq/=BAMBOO_PAGE_SIZE;
437 // locally cache the page in the hotest core
438 // NOTE: (x,y) should be changed to (x+1, y+1)!!!
439 policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
440 policy.lotar_x = bamboo_cpu2coords[2*hotestcore]+1;
441 policy.lotar_y = bamboo_cpu2coords[2*hotestcore+1]+1;
444 *tmp_p = policy.word;
447 workload[hotestcore] += totalfreq;
448 total_workload += totalfreq;
449 // insert into core2heavypages using quicksort
450 unsigned long long remoteaccess = totalfreq - hotfreq;
451 unsigned int index = (unsigned int)core2heavypages[hotestcore][0];
452 core2heavypages[hotestcore][3*index+3] = remoteaccess;
453 core2heavypages[hotestcore][3*index+2] = totalfreq;
454 core2heavypages[hotestcore][3*index+1] = (unsigned long long)(tmp_p-1);
455 core2heavypages[hotestcore][0]++;
458 unsigned long long workload_threshold =
459 total_workload / GC_CACHE_ADAPT_OVERLOAD_THRESHOLD;
460 // Check the workload of each core
461 for(int i = 0; i < NUMCORESACTIVE; i++) {
463 unsigned int index = (unsigned int)core2heavypages[i][0];
464 if(workload[i] > workload_threshold) {
465 // sort according to the remoteaccess
466 gc_quicksort(&core2heavypages[i][0], 1, index, 0);
467 while((workload[i] > workload_threshold) && (j<index*3)) {
468 // hfh those pages with more remote accesses
469 bamboo_cache_policy_t policy = {0};
470 policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
471 *((unsigned int*)core2heavypages[i][j]) = policy.word;
472 workload[i] -= core2heavypages[i][j+1];
477 // Check if the accesses are crowded on few pages
478 // sort according to the total access
480 gc_quicksort(&core2heavypages[i][0], j/3+1, index, 1);
481 unsigned long long threshold =
482 GC_CACHE_ADAPT_ACCESS_THRESHOLD*workload[i]/100;
484 unsigned long long t_workload = 0;
486 t_workload += core2heavypages[i][j+num_crowded*3+1];
488 } while(t_workload < threshold);
489 // num_crowded <= GC_CACHE_ADAPT_CROWD_THRESHOLD and if there are enough
490 // items, it is always == GC_CACHE_ADAPT_CROWD_THRESHOLD
491 if(num_crowded > GC_CACHE_ADAPT_CROWD_THRESHOLD) {
492 // need to hfh these pages
493 // sort the pages according to remote access
494 gc_quicksort(&core2heavypages[i][0], j/3+1, j/3+num_crowded, 0);
495 // h4h those pages with more remote accesses
496 bamboo_cache_policy_t policy = {0};
497 policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
498 *((unsigned int*)core2heavypages[i][j]) = policy.word;
499 workload[i] -= core2heavypages[i][j+1];
500 t_workload -= core2heavypages[i][j+1];
502 threshold = GC_CACHE_ADAPT_ACCESS_THRESHOLD*workload[i]/100;
510 void cacheAdapt_master() {
511 CACHEADAPT_OUTPUT_CACHE_SAMPLING_R();
512 unsigned int numchanged = 0;
513 // check the statistic data
514 // for each page, decide the new cache strategy
515 #ifdef GC_CACHE_ADAPT_POLICY1
516 numchanged = cacheAdapt_policy_h4h();
517 #elif defined GC_CACHE_ADAPT_POLICY2
518 numchanged = cacheAdapt_policy_local();
519 #elif defined GC_CACHE_ADAPT_POLICY3
520 numchanged = cacheAdapt_policy_hotest();
521 #elif defined GC_CACHE_ADAPT_POLICY4
522 numchanged = cacheAdapt_policy_dominate();
523 #elif defined GC_CACHE_ADAPT_POLICY5
524 numchanged = cacheAdapt_policy_overload();
525 #elif defined GC_CACHE_ADAPT_POLICY6
526 numchanged = cacheAdapt_policy_crowd();
528 *gccachepolicytbl = numchanged;
531 // adapt the cache strategy for the mutator
532 void cacheAdapt_mutator() {
533 int numchanged = *gccachepolicytbl;
534 // check the changes and adapt them
535 int * tmp_p = gccachepolicytbl+1;
536 while(numchanged--) {
537 // read out the policy
538 int page_index = *tmp_p;
539 bamboo_cache_policy_t policy = (bamboo_cache_policy_t)(*(tmp_p+1));
541 bamboo_adapt_cache_policy(page_index*(BAMBOO_PAGE_SIZE)+gcbaseva,
542 policy, BAMBOO_PAGE_SIZE);
548 void cacheAdapt_phase_client() {
549 WAITFORGCPHASE(PREFINISHPHASE);
551 GC_PRINTF("Start prefinish phase\n");
553 cacheAdapt_mutator();
554 cacheAdapt_gc(false);
555 //send init finish msg to core coordinator
556 send_msg_2(STARTUPCORE, GCFINISHPREF, BAMBOO_NUM_OF_CORE);
557 GC_PRINTF("Finish prefinish phase\n");
558 CACHEADAPT_SAMPING_RESET();
559 if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
560 // zero out the gccachesamplingtbl
561 BAMBOO_MEMSET_WH(gccachesamplingtbl_local,0,size_cachesamplingtbl_local);
562 BAMBOO_MEMSET_WH(gccachesamplingtbl_local_r,0,
563 size_cachesamplingtbl_local_r);
567 void cacheAdapt_phase_master() {
569 gcphase = PREFINISHPHASE;
570 // Note: all cores should flush their runtime data including non-gc cores
571 GC_SEND_MSG_1_TO_CLIENT(GCSTARTPREF);
572 GC_PRINTF("Start prefinish phase \n");
574 cacheAdapt_mutator();
575 CACHEADPAT_OUTPUT_CACHE_POLICY();
576 cacheAdapt_gc(false);
578 GC_CHECK_ALL_CORE_STATUS(PREFINISHPHASE == gcphase);
580 CACHEADAPT_SAMPING_RESET();
581 if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
582 // zero out the gccachesamplingtbl
583 BAMBOO_MEMSET_WH(gccachesamplingtbl_local,0,size_cachesamplingtbl_local);
584 BAMBOO_MEMSET_WH(gccachesamplingtbl_local_r,0,size_cachesamplingtbl_local_r);
585 BAMBOO_MEMSET_WH(gccachepolicytbl,0,size_cachepolicytbl);
589 void gc_output_cache_sampling() {
590 unsigned int page_index = 0;
592 unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
593 for(page_index = 0; page_index < page_num; page_index++) {
594 page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
595 unsigned int block = 0;
596 BLOCKINDEX(page_sva, &block);
597 unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
598 tprintf("va: %x page_index: %d host: %d\n",(int)page_sva,page_index,coren);
599 for(int i = 0; i < NUMCORESACTIVE; i++) {
600 int * local_tbl = (int *)((void *)gccachesamplingtbl
601 +size_cachesamplingtbl_local*i);
602 int freq = local_tbl[page_index];
607 printf("=================\n");
610 void gc_output_cache_sampling_r() {
611 unsigned int page_index = 0;
613 unsigned int page_num = (BAMBOO_SHARED_MEM_SIZE) / (BAMBOO_PAGE_SIZE);
614 for(page_index = 0; page_index < page_num; page_index++) {
615 page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
616 unsigned int block = 0;
617 BLOCKINDEX(page_sva, &block);
618 unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
619 tprintf("va: %x page_index: %d host: %d\n",(int)page_sva,page_index,coren);
620 for(int i = 0; i < NUMCORESACTIVE; i++) {
621 int * local_tbl = (int *)((void *)gccachesamplingtbl_r
622 +size_cachesamplingtbl_local_r*i);
623 int freq = local_tbl[page_index]/BAMBOO_PAGE_SIZE;
629 printf("=================\n");
631 #endif // GC_CACHE_ADAPT