/* MGCHASH ********************************************************/
-struct MGCHash * allocateMGCHash(int size,
- int conflicts) {
+//Must be a power of 2
+struct MGCHash * allocateMGCHash(int size) {
struct MGCHash *thisvar;
- if (size <= 0) {
-#ifdef MULTICORE
- BAMBOO_EXIT();
-#else
- printf("Negative Hashtable size Exception\n");
- exit(-1);
-#endif
- }
+
thisvar=(struct MGCHash *)RUNMALLOC(sizeof(struct MGCHash));
thisvar->size = size;
- thisvar->bucket=(struct MGCNode *) RUNMALLOC(sizeof(struct MGCNode)*size);
+ thisvar->mask = ((size>>1)-1)<<1;
+ thisvar->bucket=(int *) RUNCALLOC(sizeof(unsigned int)*size);
//Set data counts
thisvar->num4conflicts = conflicts;
return thisvar;
}
void freeMGCHash(struct MGCHash *thisvar) {
- int i = 0;
- for(i=thisvar->size-1; i>=0; i--) {
- struct MGCNode *ptr;
- for(ptr=thisvar->bucket[i].next; ptr!=NULL; ) {
- struct MGCNode * nextptr=ptr->next;
- RUNFREE(ptr);
- ptr=nextptr;
- }
- }
RUNFREE(thisvar->bucket);
RUNFREE(thisvar);
}
-int MGCHashadd(struct MGCHash * thisvar, int data) {
+void MGCHashreset(struct MGCHash *thisvar) {
+ for(int i=0;i<thisvar->size;i++)
+ thisvar->bucket[i]=0;
+}
+
+int MGCHashadd(struct MGCHash * thisvar, unsigned INTPTR data) {
// Rehash code
- unsigned int hashkey;
- struct MGCNode *ptr;
-
- int mask = (thisvar->size << (GC_SHIFT_BITS))-1;
- hashkey = (((unsigned INTPTR)data)&mask)>>(GC_SHIFT_BITS);
- ptr = &thisvar->bucket[hashkey];
-
- struct MGCNode * prev = NULL;
- if(ptr->data < thisvar->num4conflicts) {
- struct MGCNode *node=RUNMALLOC(sizeof(struct MGCNode));
- node->data=data;
- node->next=(ptr->next);
- ptr->next=node;
- ptr->data++;
- } else {
- while (ptr->next!=NULL) {
- prev = ptr;
- ptr = ptr->next;
- }
- ptr->data = data;
- ptr->next = thisvar->bucket[hashkey].next;
- thisvar->bucket[hashkey].next = ptr;
- prev->next = NULL;
+
+ unsigned int hashkey = (data>>GC_SHIFT_BITS)&thisvar->mask;
+ int * ptr = &thisvar->bucket[hashkey];
+ int ptrval= *ptr;
+ if (ptrval == 0) {
+ *ptr=data;
+ return 1;
+ } else if (ptrval==data) {
+ return 0;
}
+ ptr++;
- return 1;
+ if (*ptr == data) {
+ return 0;
+ } else {
+ *ptr=data;
+ return 1;
+ }
}
+
#ifdef MULTICORE
struct MGCHash * allocateMGCHash_I(int size,int conflicts) {
struct MGCHash *thisvar;
- if (size <= 0) {
-#ifdef MULTICORE
- BAMBOO_EXIT();
-#else
- printf("Negative Hashtable size Exception\n");
- exit(-1);
-#endif
- }
+
thisvar=(struct MGCHash *)RUNMALLOC_I(sizeof(struct MGCHash));
- thisvar->size = size;
- thisvar->bucket=(struct MGCNode *) RUNMALLOC_I(sizeof(struct MGCNode)*size);
+ thisvar->mask = ((size>>1)-1)<<1;
+ thisvar->bucket=(int *) RUNCALLOC_I(sizeof(int)*size);
//Set data counts
thisvar->num4conflicts = conflicts;
return thisvar;
}
-int MGCHashadd_I(struct MGCHash * thisvar, int data) {
- // Rehash code
- unsigned int hashkey;
- struct MGCNode *ptr;
-
- int mask = (thisvar->size << (GC_SHIFT_BITS))-1;
- hashkey = (((unsigned INTPTR)data)&mask)>>(GC_SHIFT_BITS);
- ptr = &thisvar->bucket[hashkey];
-
- struct MGCNode * prev = NULL;
- if(ptr->data < thisvar->num4conflicts) {
- struct MGCNode *node=RUNMALLOC_I(sizeof(struct MGCNode));
- node->data=data;
- node->next=(ptr->next);
- ptr->next=node;
- ptr->data++;
- } else {
- while (ptr->next!=NULL) {
- prev = ptr;
- ptr = ptr->next;
- }
- ptr->data = data;
- ptr->next = thisvar->bucket[hashkey].next;
- thisvar->bucket[hashkey].next = ptr;
- prev->next = NULL;
- }
-
- return 1;
+int MGCHashadd_I(struct MGCHash * thisvar, unsigned INTPTR data) {
+ return MGCHashadd(thisvar, data);
}
#endif
-int MGCHashcontains(struct MGCHash *thisvar, int data) {
- int mask = (thisvar->size << (GC_SHIFT_BITS))-1;
- unsigned int hashkey = (((unsigned INTPTR)data)&mask)>>(GC_SHIFT_BITS);
-
- struct MGCNode *ptr = thisvar->bucket[hashkey].next;
- struct MGCNode *prev = NULL;
- while (ptr!=NULL) {
- if (ptr->data == data) {
- if(prev != NULL) {
- prev->next = NULL;
- ptr->next = thisvar->bucket[hashkey].next;
- thisvar->bucket[hashkey].next = ptr;
- }
+int MGCHashcontains(struct MGCHash *thisvar, unsigned INTPTR data) {
+ // Rehash code
- return 1; // success
- }
- prev = ptr;
- ptr = ptr->next;
- }
+ unsigned int hashkey = (data>>GC_SHIFT_BITS)&thisvar->mask;
+ int * ptr = &thisvar->bucket[hashkey];
- return 0; // failure
+ if (*ptr==data) {
+ return 1;
+ }
+ ptr++;
+
+ return (*ptr == data);
}
+
// according to collected statistic data
// find the core that accesses the page #page_index most
-#define CACHEADAPT_FIND_HOTEST_CORE(page_index,hotestcore,hotfreq) \
+#define CACHEADAPT_FIND_HOTTEST_CORE(page_index,hottestcore,hotfreq) \
{ \
int *local_tbl=&gccachesamplingtbl_r[page_index]; \
for(int i = 0; i < NUMCORESACTIVE; i++) { \
local_tbl=(int *)(((char *)local_tbl)+size_cachesamplingtbl_local_r); \
if(hotfreq < freq) { \
hotfreq = freq; \
- hotestcore = i; \
+ hottestcore = i; \
} \
} \
}
// find the core that accesses the page #page_index most and comput the total
// access time of the page at the same time
-#define CACHEADAPT_FIND_HOTEST_CORE_W_TOTALFREQ(page_index,hotestcore,hotfreq,totalfreq) \
+#define CACHEADAPT_FIND_HOTTEST_CORE_W_TOTALFREQ(page_index,hottestcore,hotfreq,totalfreq) \
{ \
int *local_tbl=&gccachesamplingtbl_r[page_index]; \
for(int i = 0; i < NUMCORESACTIVE; i++) { \
totalfreq += freq; \
if(hotfreq < freq) { \
hotfreq = freq; \
- hotestcore = i; \
+ hottestcore = i; \
} \
} \
}
}
}
-void cacheAdapt_policy_hotest(int coren){
+void cacheAdapt_policy_hottest(int coren){
unsigned int page_num=(BAMBOO_SHARED_MEM_SIZE)/(BAMBOO_PAGE_SIZE);
unsigned int page_gap=page_num/NUMCORESACTIVE;
unsigned int page_index=page_gap*coren;
int * tmp_p = gccachepolicytbl;
for(; page_index < page_index_end; page_index++) {
bamboo_cache_policy_t policy = {0};
- unsigned int hotestcore = 0;
+ unsigned int hottestcore = 0;
unsigned int hotfreq = 0;
- CACHEADAPT_FIND_HOTEST_CORE(page_index,hotestcore,hotfreq);
+ CACHEADAPT_FIND_HOTTEST_CORE(page_index,hottestcore,hotfreq);
// TODO
// Decide the cache strategy for this page
// If decide to adapt a new cache strategy, write into the shared block of
// written is enough to hold the information.
// Format: page start va + cache strategy(hfh/(host core+[x,y]))
if(hotfreq != 0) {
- // locally cache the page in the hotest core
- CACHEADAPT_POLICY_SET_HOST_CORE(policy, hotestcore);
+ // locally cache the page in the hottest core
+ CACHEADAPT_POLICY_SET_HOST_CORE(policy, hottestcore);
}
CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy);
page_sva += BAMBOO_PAGE_SIZE;
int * tmp_p = gccachepolicytbl;
for(; page_index < page_index_end; page_index++) {
bamboo_cache_policy_t policy = {0};
- unsigned int hotestcore = 0;
+ unsigned int hottestcore = 0;
unsigned long long totalfreq = 0;
unsigned int hotfreq = 0;
- CACHEADAPT_FIND_HOTEST_CORE_W_TOTALFREQ(page_index,hotestcore,hotfreq,totalfreq);
+ CACHEADAPT_FIND_HOTTEST_CORE_W_TOTALFREQ(page_index,hottestcore,hotfreq,totalfreq);
// Decide the cache strategy for this page
// If decide to adapt a new cache strategy, write into the shared block of
// the gcpolicytbl
// use hfh
policy.cache_mode = BAMBOO_CACHE_MODE_HASH;
} else {
- // locally cache the page in the hotest core
- CACHEADAPT_POLICY_SET_HOST_CORE(policy, hotestcore);
+ // locally cache the page in the hottest core
+ CACHEADAPT_POLICY_SET_HOST_CORE(policy, hottestcore);
}
}
CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy);
#if 0
#define GC_CACHE_ADAPT_OVERLOAD_THRESHOLD 10
-// record the worklocad of the hotestcore into core2heavypages
-#define CACHEADAPT_RECORD_PAGE_WORKLOAD(hotestcore,totalfreq,hotfreq,remoteaccess,tmp_p) \
+// record the worklocad of the hottestcore into core2heavypages
+#define CACHEADAPT_RECORD_PAGE_WORKLOAD(hottestcore,totalfreq,hotfreq,remoteaccess,tmp_p) \
{ \
- workload[hotestcore] += (totalfreq); \
+ workload[hottestcore] += (totalfreq); \
total_workload += (totalfreq); \
unsigned long long remoteaccess = (totalfreq) - (hotfreq); \
- unsigned int index = (unsigned int)core2heavypages[hotestcore][0]; \
- core2heavypages[hotestcore][3*index+3] = (remoteaccess); \
- core2heavypages[hotestcore][3*index+2] = (totalfreq); \
- core2heavypages[hotestcore][3*index+1] = (unsigned long long)((tmp_p)-1); \
- core2heavypages[hotestcore][0]++; \
+ unsigned int index = (unsigned int)core2heavypages[hottestcore][0]; \
+ core2heavypages[hottestcore][3*index+3] = (remoteaccess); \
+ core2heavypages[hottestcore][3*index+2] = (totalfreq); \
+ core2heavypages[hottestcore][3*index+1] = (unsigned long long)((tmp_p)-1); \
+ core2heavypages[hottestcore][0]++; \
}
void gc_quicksort(unsigned long long *array,unsigned int left,unsigned int right,unsigned int offset) {
memset(core2heavypages,0,sizeof(unsigned long long)*(page_num*3+1)*NUMCORESACTIVE);
for(page_index = 0; page_sva < gctopva; page_index++) {
bamboo_cache_policy_t policy = {0};
- unsigned int hotestcore = 0;
+ unsigned int hottestcore = 0;
unsigned long long totalfreq = 0;
unsigned int hotfreq = 0;
- CACHEADAPT_FIND_HOTEST_CORE_W_TOTALFREQ(page_index,hotestcore,hotfreq,totalfreq);
+ CACHEADAPT_FIND_HOTTEST_CORE_W_TOTALFREQ(page_index,hottestcore,hotfreq,totalfreq);
// Decide the cache strategy for this page
// If decide to adapt a new cache strategy, write into the shared block of
// the gcsharedsamplingtbl. The mem recording information that has been
if(hotfreq != 0) {
totalfreq/=BAMBOO_PAGE_SIZE;
hotfreq/=BAMBOO_PAGE_SIZE;
- // locally cache the page in the hotest core
- CACHEADAPT_POLICY_SET_HOST_CORE(policy, hotestcore);
+ // locally cache the page in the hottest core
+ CACHEADAPT_POLICY_SET_HOST_CORE(policy, hottestcore);
CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy,numchanged);
- CACHEADAPT_RECORD_PAGE_WORKLOAD(hotestcore,totalfreq,hotfreq,remoteaccess,tmp_p);
+ CACHEADAPT_RECORD_PAGE_WORKLOAD(hottestcore,totalfreq,hotfreq,remoteaccess,tmp_p);
}
page_sva += BAMBOO_PAGE_SIZE;
}
memset(core2heavypages,0,sizeof(unsigned long long)*(page_num*3+1)*NUMCORESACTIVE);
for(page_index = 0; page_sva < gctopva; page_index++) {
bamboo_cache_policy_t policy = {0};
- unsigned int hotestcore = 0;
+ unsigned int hottestcore = 0;
unsigned long long totalfreq = 0;
unsigned int hotfreq = 0;
- CACHEADAPT_FIND_HOTEST_CORE_W_TOTALFREQ(page_index,hotestcore,hotfreq,totalfreq);
+ CACHEADAPT_FIND_HOTTEST_CORE_W_TOTALFREQ(page_index,hottestcore,hotfreq,totalfreq);
// Decide the cache strategy for this page
// If decide to adapt a new cache strategy, write into the shared block of
// the gcsharedsamplingtbl. The mem recording information that has been
if(hotfreq != 0) {
totalfreq/=BAMBOO_PAGE_SIZE;
hotfreq/=BAMBOO_PAGE_SIZE;
- // locally cache the page in the hotest core
- CACHEADAPT_POLICY_SET_HOST_CORE(policy, hotestcore);
+ // locally cache the page in the hottest core
+ CACHEADAPT_POLICY_SET_HOST_CORE(policy, hottestcore);
CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy,numchanged);
- CACHEADAPT_RECORD_PAGE_WORKLOAD(hotestcore,totalfreq,hotfreq,remoteaccess,tmp_p);
+ CACHEADAPT_RECORD_PAGE_WORKLOAD(hottestcore,totalfreq,hotfreq,remoteaccess,tmp_p);
}
page_sva += BAMBOO_PAGE_SIZE;
}
#elif defined GC_CACHE_ADAPT_POLICY2
cacheAdapt_policy_local(coren);
#elif defined GC_CACHE_ADAPT_POLICY3
- cacheAdapt_policy_hotest(coren);
+ cacheAdapt_policy_hottest(coren);
#elif defined GC_CACHE_ADAPT_POLICY4
cacheAdapt_policy_dominate(coren);
//#elif defined GC_CACHE_ADAPT_POLICY5