for(; page_index < page_index_end; page_index++) {
bamboo_cache_policy_t policy = {0};
unsigned int block = 0;
- BLOCKINDEX(page_sva, block);
+ BLOCKINDEX((void *) page_sva, block);
unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
CACHEADAPT_POLICY_SET_HOST_CORE(policy, coren);
CACHEADAPT_CHANGE_POLICY_4_PAGE(tmp_p,page_index,policy);
for(page_index = 0; page_index < page_num; page_index++) {
page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
unsigned int block = 0;
- BLOCKINDEX(page_sva, block);
+ BLOCKINDEX((void *) page_sva, block);
unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
printf("%x, %d, %d, ",(int)page_sva,page_index,coren);
for(int i = 0; i < NUMCORESACTIVE; i++) {
for(page_index = 0; page_index < page_num; page_index++) {
page_sva = gcbaseva + (BAMBOO_PAGE_SIZE) * page_index;
unsigned int block = 0;
- BLOCKINDEX(page_sva, block);
+ BLOCKINDEX((void *)page_sva, block);
unsigned int coren = gc_block2core[block%(NUMCORES4GC*2)];
printf(" %x, %d, %d, ",(int)page_sva,page_index,coren);
int accesscore = 0; // TODO
*((int *)(i + 4*12)), *((int *)(i + 4*13)),
*((int *)(i + 4*14)), *((int *)(i + 4*15)));
}
- sblock = gcreservedsb;
+ sblock = 0;
bool advanceblock = false;
// remaining memory
for(i=gcbaseva; (unsigned int)i<(unsigned int)(gcbaseva+BAMBOO_SHARED_MEM_SIZE); i+=4*16) {
// num of blocks per core
unsigned int numbpc = (unsigned int)b/(unsigned int)(NUMCORES4GC);
gctopblock = b;
- RESIDECORE(heaptop, gctopcore);
+ RESIDECORE(*heaptop, gctopcore);
return numbpc;
}
}
// update the bmmboo_smemtbl to record current shared mem usage
-void updateSmemTbl(unsigned int coren, unsigned int localtop) {
+void updateSmemTbl(unsigned int coren, void * localtop) {
unsigned int ltopcore = 0;
unsigned int bound = BAMBOO_SMEM_SIZE_L;
BLOCKINDEX(localtop, ltopcore);
if((unsigned int)localtop>=(unsigned int)(gcbaseva+BAMBOO_LARGE_SMEM_BOUND)){
bound = BAMBOO_SMEM_SIZE;
}
- unsigned int load = (unsigned int)(localtop-gcbaseva)%(unsigned int)bound;
+ unsigned int load = (unsigned INTPTR)(localtop-gcbaseva)%(unsigned int)bound;
unsigned int toset = 0;
for(int j=0; 1; j++) {
for(int i=0; i<2; i++) {
return gcbaseva;
}
-INLINE void movelobj(unsigned int tmpheaptop,unsigned int ptr,int size,int isize) {
+INLINE void movelobj(void * tmpheaptop, void * ptr,int size,int isize) {
// move the large obj
if((unsigned int)gcheaptop < (unsigned int)(tmpheaptop+size)) {
memmove(tmpheaptop, gcheaptop, size);
BAMBOO_MEMSET_WH(tmpheaptop+size, -2, isize-size);
gcheaptop += size;
// cache the mapping info
- gcmappingtbl[OBJMAPPINGINDEX((unsigned int)ptr)]=(unsigned int)tmpheaptop;
+ gcmappingtbl[OBJMAPPINGINDEX(ptr)]=tmpheaptop;
tmpheaptop += isize;
}
gcmem_mixed_usedmem += tomove;
#endif
// flush the sbstartbl
- BAMBOO_MEMSET_WH(&(gcsbstarttbl[gcreservedsb]),'\0',(BAMBOO_SHARED_MEM_SIZE/BAMBOO_SMEM_SIZE-(unsigned int)gcreservedsb)*sizeof(unsigned int));
+ BAMBOO_MEMSET_WH(gcsbstarttbl,'\0',(BAMBOO_SHARED_MEM_SIZE/BAMBOO_SMEM_SIZE)*sizeof(unsigned int));
if(tomove == 0) {
gcheaptop = tmpheaptop;
} else {
// check how many blocks it acrosses
unsigned INTPTR remain = (unsigned INTPTR) (tmpheaptop-gcbaseva);
//number of the sblock
- unsigned int sb = remain/BAMBOO_SMEM_SIZE+(unsigned int)gcreservedsb;
+ unsigned int sb = remain/BAMBOO_SMEM_SIZE;
unsigned int b = 0; // number of the block
BLOCKINDEX(tmpheaptop, b);
// check the remaining space in this block
remain -= BAMBOO_CACHE_LINE_SIZE;
tmpheaptop += BAMBOO_CACHE_LINE_SIZE;
BLOCKINDEX(tmpheaptop, b);
- sb = (unsigned int)(tmpheaptop-gcbaseva)/(BAMBOO_SMEM_SIZE)+gcreservedsb;
+ sb = (unsigned int)(tmpheaptop-gcbaseva)/(BAMBOO_SMEM_SIZE);
}
// move the obj
movelobj(tmpheaptop,ptr,size,isize);
// for load balancing
unsigned int gcheaptop;
-unsigned int gcloads[NUMCORES4GC];
+unsigned INTPTR gcloads[NUMCORES4GC];
unsigned int gctopcore; // the core host the top of the heap
unsigned int gctopblock; // the number of current top block
// The bottom of the shared memory = sbstart tbl + smemtbl + bamboo_rmsp
// These three types of table are always reside at the bottom of the shared
// memory and will never be moved or garbage collected
-unsigned int * gcmappingtbl;
+void ** gcmappingtbl;
unsigned int bamboo_rmsp_size;
unsigned int * gcmarktbl;
#define WAITFORGCPHASE(phase) while(gc_status_info.gcphase != phase) ;
-#define OBJMAPPINGINDEX(p) (((unsigned int)p-gcbaseva)/bamboo_baseobjsize)
+
#define ISSHAREDOBJ(p) \
((((unsigned int)p)>=gcbaseva)&&(((unsigned int)p)<(gcbaseva+(BAMBOO_SHARED_MEM_SIZE))))
/* Number of bits used for each alignment unit */
#define BITSPERALIGNMENT 2
#define ALIGNOBJSIZE(x) (x>>ALIGNMENTSHIFT)
+#define OBJMAPPINGINDEX(p) ALIGNOBJSIZE((unsigned INTPTR)(p-gcbaseva))
//There are two bits per object
//00 means not marked
} \
}
-// mapping of pointer to core #
-#define RESIDECORE(p, c) \
- { \
+#define RESIDECORE(p, c) { \
if(1 == (NUMCORES4GC)) { \
- (*((unsigned int*)c)) = 0; \
+ c = 0; \
} else { \
unsigned INTPTR b; \
BLOCKINDEX(p, b); \
}
// should be invoked with interrupt closed
-INLINE int assignSpareMem_I(unsigned int sourcecore,unsigned int * requiredmem,unsigned int * tomove,unsigned int * startaddr) {
+INLINE int assignSpareMem_I(unsigned int sourcecore,unsigned int * requiredmem, void ** tomove, void ** startaddr) {
unsigned int b = 0;
BLOCKINDEX(gcloads[sourcecore], b);
unsigned int boundptr = BOUNDPTR(b);
}
}
-INLINE int assignSpareMem(unsigned int sourcecore,unsigned int * requiredmem,unsigned int * tomove,unsigned int * startaddr) {
+INLINE int assignSpareMem(unsigned int sourcecore,unsigned int * requiredmem,unsigned int * tomove, void ** startaddr) {
BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
int retval=assignSpareMem_I(sourcecore, requiredmem, tomove, startaddr);
BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
INLINE void compact2Heaptop() {
// no cores with spare mem and some cores are blocked with pending move
// find the current heap top and make them move to the heap top
- unsigned int p;
+ unsigned int p = gcloads[gctopcore];
unsigned int numblocks = gcfilledblocks[gctopcore];
- p = gcloads[gctopcore];
unsigned int b;
BLOCKINDEX(p, b);
unsigned int remain=GC_BLOCK_REMAIN_SIZE(b, p);
}
// check the bamboo_smemtbl to decide the real bound
orig->bound = orig->base + bamboo_smemtbl[blocknum];
- } else if(0 == (orig->blockbase%BAMBOO_SMEM_SIZE)) {
+ } else if(0 == ((unsigned INTPTR)orig->blockbase)%BAMBOO_SMEM_SIZE) {
orig->sblockindex += 1;
sbchanged = true;
}
to->bound = BAMBOO_SMEM_SIZE_L;
BASEPTR(BAMBOO_NUM_OF_CORE, to->numblocks, &(to->base));
- unsigned int tobase = to->base;
+ void * tobase = to->base;
to->ptr = tobase + to->offset;
// init the orig ptr
orig->base = tobase;
unsigned int blocknum = 0;
BLOCKINDEX(orig->base, blocknum);
- unsigned int origbase = orig->base;
+ void * origbase = orig->base;
// check the bamboo_smemtbl to decide the real bound
- orig->bound = origbase + (unsigned int)bamboo_smemtbl[blocknum];
+ orig->bound = origbase + (unsigned INTPTR)bamboo_smemtbl[blocknum];
orig->blockbase = origbase;
- orig->sblockindex = (unsigned int)(origbase - gcbaseva) / BAMBOO_SMEM_SIZE;
+ orig->sblockindex = (unsigned INTPTR)(origbase - gcbaseva) / BAMBOO_SMEM_SIZE;
int sbstart = gcsbstarttbl[orig->sblockindex];
if(sbstart == -1) {
#define FLUSHOBJNONNULL(obj, tt) {void *flushtmpptr=obj; obj=flushObj(flushtmpptr);}
INLINE void * flushObj(void * objptr) {
- return gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)];
+ return gcmappingtbl[OBJMAPPINGINDEX(objptr)];
}
INLINE void updategarbagelist(struct garbagelist *listptr) {