#include "multicoregcprofile.h"
#include "gcqueue.h"
-#ifdef SMEMM
-extern unsigned int gcmem_mixed_threshold;
-extern unsigned int gcmem_mixed_usedmem;
-#endif // SMEMM
-
volatile bool gcflag;
gc_status_t gc_status_info;
#endif
void initmulticoregcdata() {
- if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
- // startup core to initialize corestatus[]
- for(int i = 0; i < NUMCORESACTIVE; i++) {
- gccorestatus[i] = 1;
- gcnumsendobjs[0][i] = gcnumsendobjs[1][i] = 0;
- gcnumreceiveobjs[0][i] = gcnumreceiveobjs[1][i] = 0;
- }
- for(int i = 0; i < NUMCORES4GC; i++) {
- gcloads[i] = 0;
- gcrequiredmems[i] = 0;
- gcstopblock[i] = 0;
- gcfilledblocks[i] = 0;
- }
- }
-
bamboo_smem_zero_top = NULL;
gcflag = false;
gc_status_info.gcprocessing = false;
gc_status_info.gcphase = FINISHPHASE;
gcprecheck = true;
- gccurr_heaptop = 0;
- gcself_numsendobjs = 0;
- gcself_numreceiveobjs = 0;
- gcmarkedptrbound = 0;
gcforwardobjtbl = allocateMGCHash_I(128);
gcheaptop = 0;
- gcmovestartaddr = 0;
- gctomove = false;
- gcmovepending = 0;
- gcblock2fill = 0;
-#ifdef SMEMM
- gcmem_mixed_threshold=(unsigned int)((BAMBOO_SHARED_MEM_SIZE-bamboo_reserved_smem*BAMBOO_SMEM_SIZE)*0.8);
- gcmem_mixed_usedmem = 0;
-#endif
#ifdef MGC_SPEC
gc_profile_flag = false;
#endif
return numbpc;
}
-
-// update the bmmboo_smemtbl to record current shared mem usage
-void updateSmemTbl(unsigned int coren, void * localtop) {
- unsigned int ltopcore = 0;
- unsigned int bound = BAMBOO_SMEM_SIZE_L;
- BLOCKINDEX(ltopcore, localtop);
- if((unsigned int)localtop>=(unsigned int)(gcbaseva+BAMBOO_LARGE_SMEM_BOUND)){
- bound = BAMBOO_SMEM_SIZE;
- }
- unsigned int load = (unsigned INTPTR)(localtop-gcbaseva)%(unsigned int)bound;
- unsigned int toset = 0;
- for(int j=0; 1; j++) {
- for(int i=0; i<2; i++) {
- toset = gc_core2block[2*coren+i]+(unsigned int)(NUMCORES4GC*2)*j;
- if(toset < ltopcore) {
- bamboo_smemtbl[toset]=BLOCKSIZE(toset<NUMCORES4GC);
-#ifdef SMEMM
- gcmem_mixed_usedmem += bamboo_smemtbl[toset];
-#endif
- } else if(toset == ltopcore) {
- bamboo_smemtbl[toset] = load;
-#ifdef SMEMM
- gcmem_mixed_usedmem += bamboo_smemtbl[toset];
-#endif
- return;
- } else {
- return;
- }
- }
- }
-}
-
void gc_collect(struct garbagelist * stackptr) {
gc_status_info.gcprocessing = true;
// inform the master that this core is at a gc safe point and is ready to
SUBTLECOMPACTPHASE, // 0x3
MAPPHASE, // 0x4
UPDATEPHASE, // 0x5
-#ifdef GC_CACHE_ADAPT
CACHEPOLICYPHASE, // 0x6
PREFINISHPHASE, // 0x7
-#endif
FINISHPHASE // 0x6/0x8
} GCPHASETYPE;
unsigned int gcfilledblocks[NUMCORES4GC]; //indicate how many blocks have been fulfilled
// move instruction;
-unsigned int gcmovestartaddr;
+//this points to memory handed to core from master
+volatile unsigned int gcmovestartaddr;
+//this flag tells core that it is okay to start compacting
volatile bool gctomove;
+
+//keeps track of memory request master was not able to serve
unsigned int gcrequiredmems[NUMCORES4GC]; //record pending mem requests
volatile unsigned int gcmovepending;
// The bottom of the shared memory = sbstart tbl + smemtbl + bamboo_rmsp
// These three types of table are always reside at the bottom of the shared
// memory and will never be moved or garbage collected
+//gcmappingtable gives new pointer location
void ** gcmappingtbl;
+//number of bytes in mapping table
unsigned int bamboo_rmsp_size;
+//mark table....keep track of mark bits
unsigned int * gcmarktbl;
-// table recording the starting address of each small block
-// (size is BAMBOO_SMEM_SIZE)
-// Note: 1. this table always resides on the very bottom of the shared memory
-int * gcsbstarttbl;
-#ifdef GC_TBL_DEBUG
-unsigned int gcsbstarttbl_len;
-#endif
unsigned int gcnumblock; // number of total blocks in the shared mem
void * gcbaseva; // base va for shared memory without reserved sblocks
#define WAITFORGCPHASE(phase) while(gc_status_info.gcphase != phase) ;
-
-
#define ISSHAREDOBJ(p) \
((((unsigned int)p)>=gcbaseva)&&(((unsigned int)p)<(gcbaseva+(BAMBOO_SHARED_MEM_SIZE))))
-
#define MAXBLOCK 0x4fffffff //local block number that can never be reached...
void * array[];
};
-struct listitem {
- struct listitem * prev;
- struct listitem * next;
- struct garbagelist * stackptr;
-};
-
#endif // BAMBOO_MULTICORE_GC_H
extern struct global_defs_t * global_defs_p;
-#ifdef SMEMM
-extern unsigned int gcmem_mixed_threshold;
-extern unsigned int gcmem_mixed_usedmem;
-#endif
-
#ifdef MGC
extern struct lockvector bamboo_threadlocks;
#endif
int * allocsize) {
void * mem = NULL;
// find suitable block
- mem=gcbaseva+bamboo_smemtbl[tofindb]+OFFSET2BASEVA(tofindb);
- *allocsize = size;
- // set bamboo_smemtbl
- for(int i = tofindb; i <= totest; i++) {
- bamboo_smemtbl[i]=BLOCKSIZE(i<NUMCORES4GC);
- }
- if(tofindb == bamboo_free_block) {
- bamboo_free_block = totest+1;
- }
return mem;
}
int isize,
int * allocsize,
int minremain) {
- int i=0;
- int j=0;
- int size = 0;
- int bound = BAMBOO_SMEM_SIZE_L;
- int freeblocks=(gcnumblock-bamboo_reserved_smem-1)/NUMCORES4GC+1;
- while((*totest<(gcnumblock-bamboo_reserved_smem))&&(freeblocks>minremain)) {
- bound = BLOCKSIZE(*totest<NUMCORES4GC);
- int nsize = bamboo_smemtbl[*totest];
- if((nsize==bound)||((nsize != 0)&&(*totest != *tofindb))) {
- // a fully/partially occupied partition, can not be appended
- //the last continuous block is not big enough,check the next local block
- j+=i;
- i=(i+1)&1;
- *tofindb=*totest=gc_core2block[2*gccorenum+i]+(NUMCORES4GC*2)*j;
- freeblocks--;
- } else {
- // an empty block or a partially occupied block that can be set as the
- // first block
- if(*totest == *tofindb) {
- // the first partition
- size = bound - nsize;
- } else if(nsize == 0) {
- // an empty partition, can be appended
- size += bound;
- }
- if(size >= isize) {
- // have enough space in the block, malloc
- return mallocmem(*tofindb, *totest, size, allocsize);
- } else {
- // no enough space yet, try to append next continuous block
- *totest = *totest + 1;
- }
- }
- }
return NULL;
}
int* totest,
int isize,
int * allocsize) {
- int size = 0;
- int bound = BAMBOO_SMEM_SIZE_L;
- while(*totest<(gcnumblock-bamboo_reserved_smem)) {
- bound = BLOCKSIZE(*totest<NUMCORES4GC);
- int nsize = bamboo_smemtbl[*totest];
- if((nsize==bound)||((nsize != 0)&&(*totest != *tofindb))) {
- // a fully/partially occupied partition, can not be appended
- // set the next block as a new start
- *totest = *totest+1;
- *tofindb = *totest;
- } else {
- // an empty block or a partially occupied block that can be set as the
- // first block
- if(*totest == *tofindb) {
- // the first partition
- size = bound - nsize;
- } else if(nsize == 0) {
- // an empty partition, can be appended
- size += bound;
- }
- if(size >= isize) {
- // have enough space in the block, malloc
- return mallocmem(*tofindb, *totest, size, allocsize);
- } else {
- // no enough space yet, try to append next continuous block
- *totest = *totest + 1;
- }
- }
- }
+
return NULL;
}
tofindb=totest=gc_core2block[2*core2test[gccorenum][k]];
mem=searchBlock4Mem(&tofindb,&totest,core2test[gccorenum][k],isize,allocsize,(k==0)?0:((gcnumblock/NUMCORES4GC)>>LOCALMEMRESERVATION));
if(mem!=NULL) {
- gcmem_mixed_usedmem+=size;
return mem;
}
}
- if(gcmem_mixed_usedmem>=gcmem_mixed_threshold) {
- // no more memory available on either coren or its neighbour cores
- *allocsize = 0;
- return NULL;
- } else {
- // try allocate globally
- mem=globalmalloc_I(coren,isize,allocsize);
- if(mem!=NULL) {
- gcmem_mixed_usedmem+=size;
- }
- return mem;
- }
+
+ // try allocate globally
+ mem=globalmalloc_I(coren,isize,allocsize);
+ return mem;
}
#endif
void * globalmalloc_I(int coren,
int isize,
int * allocsize) {
- void * mem = NULL;
- int tofindb = bamboo_free_block;
- int totest = tofindb;
- if(tofindb > gcnumblock-1-bamboo_reserved_smem) {
- // Out of shared memory
- *allocsize = 0;
- return NULL;
- }
- mem=searchBlock4Mem_global(&tofindb, &totest, isize, allocsize);
- if(mem == NULL) {
- *allocsize = 0;
- }
- return mem;
+ return NULL;
}
void * smemalloc(int coren, int isize, int * allocsize) {
#define BAMBOO_THREAD_QUEUE_SIZE (BAMBOO_SMEM_SIZE) // (45 * 16 * 1024)
#endif // GC_SMALLPAGESIZE
-volatile bool gc_localheap_s;
-
-struct freeMemItem {
- unsigned int ptr;
- int size;
- int startblock;
- int endblock;
- struct freeMemItem * next;
-};
-
-struct freeMemList {
- struct freeMemItem * head;
- struct freeMemItem * backuplist; // hold removed freeMemItem for reuse;
- // only maintain 1 freemMemItem
-};
-
-// table recording the number of allocated bytes on each block
-// Note: this table resides on the bottom of the shared heap for all cores
-// to access
-volatile unsigned int * bamboo_smemtbl;
-#ifdef GC_TBL_DEBUG
-// the length of the bamboo_smemtbl is gcnumblock
-#endif
-volatile unsigned int bamboo_free_block;
-unsigned int bamboo_reserved_smem; // reserved blocks on the top of the shared
- // heap e.g. 20% of the heap and should not
- // be allocated otherwise gc is invoked
+//keeps track of the top address that has been zero'd by the allocator
volatile unsigned int bamboo_smem_zero_top;
//BAMBOO_SMEM_ZERO_UNIT_SIZE must evenly divide the page size and be a
//power of two(we rely on both in the allocation function)
#define BAMBOO_SMEM_ZERO_UNIT_SIZE 4096
#else
-//volatile mspace bamboo_free_msp;
+//This is for memory allocation with no garbage collection
unsigned int bamboo_free_smemp;
int bamboo_free_smem_size;
#endif // MULTICORE_GC
+//This flag indicates that a memory request was services
volatile bool smemflag;
+//Pointer to new block of memory after request
volatile unsigned int * bamboo_cur_msp;
+//Number of bytes in new block of memory
volatile int bamboo_smem_size;
#endif // BAMBOO_MULTICORE_MEM_H
};
#endif // GC_1
#elif defined SMEMM
-unsigned int gcmem_mixed_threshold = 0;
-unsigned int gcmem_mixed_usedmem = 0;
#define NUM_CORES2TEST 13
#ifdef GC_1
int core2test[1][NUM_CORES2TEST] = {
void * p = NULL;
int isize = size;
BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
-#ifdef MULTICORE_GC
- extern bool gc_localheap_s;
-inermycalloc_i:
- p = gc_localheap_s ? BAMBOO_LOCAL_MEM_CALLOC_S(isize) :
- BAMBOO_LOCAL_MEM_CALLOC(isize);
-#else
- p = BAMBOO_LOCAL_MEM_CALLOC(isize); // calloc(m, isize);
-#endif
+ p = BAMBOO_LOCAL_MEM_CALLOC(isize);
+
if(p == NULL) {
-#ifdef MULTICORE_GC
- if(!gc_localheap_s) {
- gc_localheap_s = true;
- goto inermycalloc_i;
- }
-#endif
printf("mycalloc %s %d \n", file, line);
BAMBOO_EXIT();
}
int line) {
void * p = NULL;
int isize = size;
-#ifdef MULTICORE_GC
- extern bool gc_localheap_s;
-inermycalloc_i:
- p = gc_localheap_s ? BAMBOO_LOCAL_MEM_CALLOC_S(isize) :
- BAMBOO_LOCAL_MEM_CALLOC(isize);
-#else
- p = BAMBOO_LOCAL_MEM_CALLOC(isize); // calloc(m, isize);
-#endif
+ p = BAMBOO_LOCAL_MEM_CALLOC(isize);
if(p == NULL) {
-#ifdef MULTICORE_GC
- if(!gc_localheap_s) {
- gc_localheap_s = true;
- goto inermycalloc_i;
- }
-#endif
tprintf("mycalloc_i %s %d \n", file, line);
BAMBOO_EXIT();
}
void myfree(void * ptr) {
BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
-#ifdef MULTICORE_GC
- if(ptr >= BAMBOO_LOCAL_HEAP_START_VA ) {
-#endif
BAMBOO_LOCAL_MEM_FREE(ptr);
-#ifdef MULTICORE_GC
-} else if(ptr >= BAMBOO_LOCAL_HEAP_START_VA_S) {
- BAMBOO_LOCAL_MEM_FREE_S(ptr);
-}
-#endif
BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
return;
}
void myfree_i(void * ptr) {
-#ifdef MULTICORE_GC
- if(ptr >= BAMBOO_LOCAL_HEAP_START_VA ) {
-#endif
BAMBOO_LOCAL_MEM_FREE(ptr);
-#ifdef MULTICORE_GC
-} else if(ptr >= BAMBOO_LOCAL_HEAP_START_VA_S) {
- BAMBOO_LOCAL_MEM_FREE_S(ptr);
-}
-#endif
return;
}
void * mycalloc_share(int size);
#define FREEMALLOC(x) mycalloc_share(x)
#endif // #ifdef MULTICORE_GC
-//#define PTR(x) (32+(x-1)&~31)
#endif // #ifdef MULTICORE
#endif // #ifdef PRECISE_GC
#endif // #ifdef BOEHM_GC