#ifdef MULTICORE_GC
#ifdef GC_CACHE_ADAPT
-#define GC_TILE_TIMER_EVENT_SETTING 100000000 // should be consistent with
- // runtime_arch.h
+#define GC_CACHE_SAMPLING_UNIT 100000000
+#define GC_TILE_TIMER_EVENT_SETTING 100000000
#define GC_NUM_SAMPLING 24
#define GC_CACHE_ADAPT_HOTPAGE_THRESHOLD 1000
#define GC_CACHE_ADAPT_ACCESS_THRESHOLD 30
// the master core decides how to adapt cache strategy for the mutator
// according to collected statistic data
extern int gc_num_sampling;
+
+bool cacheAdapt_policy_d(VA page_sva,
+ bamboo_cache_policy_t* policy,
+ int page_num,
+ int page_index){
+ int hottestcore = 0;
+ int num_hotcore = 0;
+ int hotfreq = 0;
+
+ for(int i = 0; i < NUMCORESACTIVE; i++) {
+ int * local_tbl = (int *)((void *)gccachesamplingtbl_r
+ +page_num*sizeof(float)*i);
+ int freq = local_tbl[page_index];
+ // TODO
+/* if(page_sva == 0xd180000) {
+ tprintf("%x %d %d\n", (int)page_sva, i, (int)(freq*100000));
+ }*/
+ // TODO
+ // check the freqency, decide if this page is hot for the core
+ if(hotfreq < freq) {
+ hotfreq = freq;
+ hottestcore = i;
+ }
+ if(freq > GC_CACHE_ADAPT_HOTPAGE_THRESHOLD) {
+ num_hotcore++;
+ }
+ }
+ // TODO
+ // Decide the cache strategy for this page
+ // If decide to adapt a new cache strategy, write into the shared block of
+ // the gcsharedsamplingtbl. The mem recording information that has been
+ // written is enough to hold the information.
+ // Format: page start va + cache strategy(hfh/(host core+[x,y]))
+ if(hotfreq == 0) {
+ // this page has not been accessed, do not change its cache policy
+ return false;
+ }
+ if(num_hotcore > GC_CACHE_ADAPT_ACCESS_THRESHOLD) {
+ // use hfh
+ policy->cache_mode = BAMBOO_CACHE_MODE_HASH;
+ } else {
+ // locally cache the page in the hottest core
+ // NOTE: (x,y) should be changed to (x+1, y+1)!!!
+ policy->cache_mode = BAMBOO_CACHE_MODE_COORDS;
+ policy->lotar_x = bamboo_cpu2coords[2*hottestcore]+1;
+ policy->lotar_y = bamboo_cpu2coords[2*hottestcore+1]+1;
+ }
+ return true;
+}
+
void cacheAdapt_master() {
// check the statistic data
// for each page, decide the new cache strategy
int num_hotcore = 0;
int hotfreq = 0;
for(page_index = 0; page_index < page_num; page_index++) {
- hottestcore = 0;
- num_hotcore = 0;
- hotfreq = 0;
page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- int * local_tbl = (int *)((void *)gccachesamplingtbl_r
- +page_num*sizeof(float)*i);
- int freq = local_tbl[page_index];
- // TODO
-/* if(page_sva == 0xd180000) {
- tprintf("%x %d %d\n", (int)page_sva, i, (int)(freq*100000));
- }*/
- // TODO
- // check the freqency, decide if this page is hot for the core
- if(hotfreq < freq) {
- hotfreq = freq;
- hottestcore = i;
- }
- if(freq > GC_CACHE_ADAPT_HOTPAGE_THRESHOLD) {
- num_hotcore++;
- }
- }
- // TODO
- // Decide the cache strategy for this page
- // If decide to adapt a new cache strategy, write into the shared block of
- // the gcsharedsamplingtbl. The mem recording information that has been
- // written is enough to hold the information.
- // Format: page start va + cache strategy(hfh/(host core+[x,y]))
- if(hotfreq == 0) {
- // this page has not been accessed, do not change its cache strategy
- continue;
- }
bamboo_cache_policy_t policy = {0};
- if(num_hotcore > GC_CACHE_ADAPT_ACCESS_THRESHOLD) {
- // use hfh
- policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
- } else {
- // locally cache the page in the hottest core
- // NOTE: (x,y) should be changed to (x+1, y+1)!!!
- policy.cache_mode = BAMBOO_CACHE_MODE_COORDS;
- policy.lotar_x = bamboo_cpu2coords[2*hottestcore]+1;
- policy.lotar_y = bamboo_cpu2coords[2*hottestcore+1]+1;
+ bool ischange=cacheAdapt_policy_d(page_sva, &policy, page_num, page_index);
+ if(ischange) {
+ *tmp_p = page_index;
+ tmp_p++;
+ *tmp_p = policy.word;
+ tmp_p++;
+ numchanged++;
}
- *tmp_p = page_index;
- tmp_p++;
- *tmp_p = policy.word;
- tmp_p++;
- numchanged++;
}
*gccachepolicytbl = numchanged;
}
#define BAMBOO_PAGE_SIZE (64 * 64)
#define BAMBOO_SMEM_SIZE (64 * 64) // (BAMBOO_PAGE_SIZE)
#define BAMBOO_SHARED_MEM_SIZE ((BAMBOO_PAGE_SIZE) *(BAMBOO_NUM_PAGES))
+
+#elif defined GC_CACHE_ADAPT
+#define BAMBOO_NUM_PAGES ((GC_BAMBOO_NUMCORES)*(2+14))
+#ifdef GC_LARGEPAGESIZE
+#define BAMBOO_PAGE_SIZE (1024 * 1024) // (4096)
+#define BAMBOO_SMEM_SIZE (1024 * 1024)
+#elif defined GC_SMALLPAGESIZE
+#define BAMBOO_PAGE_SIZE (64 * 1024) // (4096)
+#define BAMBOO_SMEM_SIZE (64 * 1024)
+#elif defined GC_SMALLPAGESIZE2
+#define BAMBOO_PAGE_SIZE (16 * 1024) // (4096)
+#define BAMBOO_SMEM_SIZE (16 * 1024)
#else
+#define BAMBOO_PAGE_SIZE (256 * 1024) // (4096)
+#define BAMBOO_SMEM_SIZE (256 * 1024)
+#endif // GC_LARGEPAGESIZE
+#define BAMBOO_SHARED_MEM_SIZE ((BAMBOO_PAGE_SIZE) * (BAMBOO_NUM_PAGES))
+
+#else // GC_DEBUG
#ifdef GC_LARGESHAREDHEAP
#define BAMBOO_NUM_PAGES ((GC_BAMBOO_NUMCORES)*(2+2))
#elif defined GC_LARGESHAREDHEAP2
}
return NULL;
#else
- BAMBOO_DEBUGPRINT(0xa001);
- BAMBOO_EXIT(0xa001);
+ BAMBOO_DEBUGPRINT(0xe003);
+ BAMBOO_EXIT(0xe003);
#endif
}
return mem;
while(i-- > 0) {
BAMBOO_DEBUGPRINT(msgdata[msgdataindex+i]);
}
- BAMBOO_EXIT(0xd005);
+ BAMBOO_EXIT(0xe004);
break;
}
}
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(msgdata[msgdataindex] /*[2]*/);
#endif
- BAMBOO_EXIT(0xa002);
+ BAMBOO_EXIT(0xe005);
}
// store the object and its corresponding queue info, enqueue it later
transObj->objptr = (void *)msgdata[msgdataindex]; //[2]
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(msgdata[msgdataindex] /*[1]*/);
#endif
- BAMBOO_EXIT(0xa003);
+ BAMBOO_EXIT(0xe006);
}
int num_core = msgdata[msgdataindex]; //[1]
MSG_INDEXINC_I();
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(msgdata[msgdataindex] /*[2]*/);
#endif
- BAMBOO_EXIT(0xa004);
+ BAMBOO_EXIT(0xe007);
}
int data2 = msgdata[msgdataindex];
MSG_INDEXINC_I();
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xa005);
+ BAMBOO_EXIT(0xe008);
}
}
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xa006);
+ BAMBOO_EXIT(0xe009);
}
if((lockobj == data2) && (lock2require == data3)) {
#ifdef DEBUG
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xa007);
+ BAMBOO_EXIT(0xe00a);
}
}
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xa00a);
+ BAMBOO_EXIT(0xe00b);
}
if(lockobj == data2) {
#ifdef DEBUG
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xa00b);
+ BAMBOO_EXIT(0xe00c);
}
}
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xa00c);
+ BAMBOO_EXIT(0xe00d);
}
if(lockobj == data2) {
#ifdef DEBUG
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xa00d);
+ BAMBOO_EXIT(0xe00e);
}
}
INLINE void processmsg_profileoutput_I() {
if(BAMBOO_NUM_OF_CORE == STARTUPCORE) {
// startup core can not receive profile output finish msg
- BAMBOO_EXIT(0xa008);
+ BAMBOO_EXIT(0xe00f);
}
#ifdef DEBUG
#ifndef CLOSE_PRINT
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(msgdata[msgdataindex /*1*/]);
#endif
- BAMBOO_EXIT(0xa009);
+ BAMBOO_EXIT(0xe010);
}
#ifdef DEBUG
#ifndef CLOSE_PRINT
if((BAMBOO_NUM_OF_CORE == STARTUPCORE)
|| (BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1)) {
// wrong core to receive such msg
- BAMBOO_EXIT(0xa00e);
+ BAMBOO_EXIT(0xe011);
} else {
// send response msg
#ifdef DEBUG
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xa00f);
+ BAMBOO_EXIT(0xe012);
} else {
#ifdef DEBUG
#ifndef CLOSE_PRINT
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xa010);
+ BAMBOO_EXIT(0xe013);
} else {
#ifdef DEBUG
#ifndef CLOSE_PRINT
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data1);
#endif
- BAMBOO_EXIT(0xb000);
+ BAMBOO_EXIT(0xe014);
}
// All cores should do init GC
if(!gcprecheck) {
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data1);
#endif
- BAMBOO_EXIT(0xb001);
+ BAMBOO_EXIT(0xe015);
}
#ifdef DEBUG
BAMBOO_DEBUGPRINT(0xe88c);
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data1);
#endif
- BAMBOO_EXIT(0xb002);
+ BAMBOO_EXIT(0xe016);
}
// all cores should do mark
if(data1 < NUMCORESACTIVE) {
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(msgdata[msgdataindex] /*[1]*/);
#endif
- BAMBOO_EXIT(0xb003);
+ BAMBOO_EXIT(0xe017);
}
int cnum = msgdata[msgdataindex];
MSG_INDEXINC_I(); //msgdata[1];
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data1);
#endif
- BAMBOO_EXIT(0xb004);
+ BAMBOO_EXIT(0xe018);
}
// all cores should do flush
if(data1 < NUMCORES4GC) {
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data1);
#endif
- BAMBOO_EXIT(0xb005);
+ BAMBOO_EXIT(0xe019);
}
// all cores should do flush
if(data1 < NUMCORESACTIVE) {
if((BAMBOO_NUM_OF_CORE == STARTUPCORE)
|| (BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1)) {
// wrong core to receive such msg
- BAMBOO_EXIT(0xb006);
+ BAMBOO_EXIT(0xe01a);
} else {
// send response msg, cahce the msg first
if(BAMBOO_CHECK_SEND_MODE()) {
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xb007);
+ BAMBOO_EXIT(0xe01b);
} else {
int entry_index = 0;
if(waitconfirm) {
BAMBOO_DEBUGPRINT_REG(data1);
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xb008);
+ BAMBOO_EXIT(0xe01c);
//assume that the object was not moved, use the original address
/*if(isMsgSending) {
cache_msg_3(msgdata[2], GCMAPINFO, msgdata[1], msgdata[1]);
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data2);
#endif
- BAMBOO_EXIT(0xb009);
+ BAMBOO_EXIT(0xe01d);
}
// store the mark result info
int cnum = data2;
#ifndef CLOSE_PRINT
BAMBOO_DEBUGPRINT_REG(data1);
#endif
- BAMBOO_EXIT(0xb00a);
+ BAMBOO_EXIT(0xe01e);
}
// all cores should do flush
if(data1 < NUMCORESACTIVE) {
// reside on this core
if(!RuntimeHashcontainskey(locktbl, reallock)) {
// no locks for this object, something is wrong
- BAMBOO_EXIT(0xa00b);
+ BAMBOO_EXIT(0xe01f);
} else {
int rwlock_obj = 0;
struct LockValue * lockvalue = NULL;