void initGC() {
if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
- for(int i=0; i<GCNUMBLOCK;i++) {
- allocationinfo.blocktable[i].status=BS_INIT;
- }
- allocationinfo.lowestfreeblock=NOFREEBLOCK;
for(int i = 0; i < NUMCORES4GC; i++) {
gccorestatus[i] = 1;
gcnumsendobjs[0][i] = gcnumsendobjs[1][i] = 0;
gcnumreceiveobjs[0][i] = gcnumreceiveobjs[1][i] = 0;
gcloads[i] = 0;
gcrequiredmems[i] = 0;
- gcfilledblocks[i] = 0;
- gcstopblock[i] = 0;
}
for(int i = NUMCORES4GC; i < NUMCORESACTIVE; i++) {
gccorestatus[i] = 1;
}
gcself_numsendobjs = 0;
gcself_numreceiveobjs = 0;
- gcmarkedptrbound = 0;
gcmovestartaddr = 0;
gctomove = false;
gcblock2fill = 0;
//spin until we have all responses
while(numconfirm!=0) ;
- // check the heaptop
- if(gcheaptop < gcmarkedptrbound) {
- gcheaptop = gcmarkedptrbound;
- }
GCPROFILE_ITEM();
GC_PRINTF("prepare to cache large objs \n");
// for load balancing
unsigned int gcheaptop;
unsigned INTPTR gcloads[NUMCORES4GC];
+unsigned INTPTR numblockspercore;
//Top of each core's heap
void * topptrs[NUMCORES4GC];
// compact instruction
-unsigned int gcmarkedptrbound;
+//keep track of what block we can fill to
unsigned int gcblock2fill;
-unsigned int gcstopblock[NUMCORES4GC]; // indicate when to stop compact phase
-unsigned int gcfilledblocks[NUMCORES4GC]; //indicate how many blocks have been fulfilled
// move instruction;
//this points to memory handed to core from master
/* Total number of blocks in heap */
#define GCNUMBLOCK (NUMCORES4GC+(BAMBOO_SHARED_MEM_SIZE-BAMBOO_LARGE_SMEM_BOUND)/BAMBOO_SMEM_SIZE)
+#define GCNUMLOCALBLOCK (GCNUMBLOCK/NUMCORES4GC)
/* This macro waits for the given gc phase */
#define WAITFORGCPHASE(phase) while(gc_status_info.gcphase != phase) ;
//Rounds object size up to next alignment unit size
#define ALIGNSIZE(s) ((((unsigned int)(s-1))&~(ALIGNMENTBYTES-1))+ALIGNMENTBYTES)
+#define GLOBALBLOCK2LOCAL(s) (s/NUMCORES4GC)
+
// mapping of pointer to block # (start from 0), here the block # is
// the global index
#define BLOCKINDEX(b, p) \
void * gcfindSpareMem_I(unsigned int requiredmem,unsigned int requiredcore) {
void * startaddr;
for(int k = 0; k < NUMCORES4GC; k++) {
- if((gccorestatus[k] == 0) && (gcfilledblocks[k] < gcstopblock[k])) {
- // check if this stopped core has enough mem
- startaddr=assignSpareMem_I(k, requiredmem);
- return startaddr;
- }
+
}
// If we cannot find spare mem right now, hold the request
gcrequiredmems[requiredcore] = requiredmem;
void master_compact() {
// predict number of blocks to fill for each core
void * tmpheaptop = 0;
- int numblockspercore = loadbalance(&tmpheaptop);
+ numblockspercore = loadbalance(&tmpheaptop);
GC_PRINTF("mark phase finished \n");
gc_resetCoreStatus();
- tmpheaptop = gcbaseva + BAMBOO_SHARED_MEM_SIZE;
+ //initialize local data structures first....we don't want remote requests messing data up
+ unsigned int initblocks=numblockspercore*NUMCORES4GC;
+ allocationinfo.lowestfreeblock=NOFREEBLOCKS;
+
+ //assigned blocks
+ for(int i=0;i<initblocks;i++) {
+ allocationinfo.blocktable[i].status=BS_INIT;
+ }
+
+ //free blocks
+ for(int i=initblocks;i<GCNUMBLOCK;i++) {
+ allocationinfo.blocktable[i].status=BS_FREE;
+ allocationinfo.blocktable[i].usedspace=0;
+ }
+
+ //start all of the cores
for(int i = 0; i < NUMCORES4GC; i++) {
// init some data strutures for compact phase
- gcfilledblocks[i] = 0;
gcrequiredmems[i] = 0;
gccorestatus[i] = 1;
//send start compact messages to all cores
- gcstopblock[i] = numblockspercore;
if(i != STARTUPCORE) {
send_msg_2(i, GCSTARTCOMPACT, numblockspercore);
} else {
tomark(stackptr);
gccurr_heaptop = 0; // record the size of all active objs in this core
// aligned but does not consider block boundaries
- gcmarkedptrbound = 0;
}
unsigned int isize = 0;
bool sendStall = false;
// ptr is an unmarked active object on this core
unsigned int isize=iunits<<ALIGNMENTSHIFT;
gccurr_heaptop += isize;
- void *top=ptr+isize;
- if (top>gcmarkedptrbound)
- gcmarkedptrbound=top;
}
// scan the pointers in object
unsigned int blockindex;
BLOCKINDEX(blockindex, heaptop);
struct blockrecord * blockrecord=&allocationinfo.blocktable[blockindex];
- if (cnum==blockrecord) {
- //this is our own memory...need to clear our lower blocks
+ blockrecord->status=BS_FREE;
+ blockrecord->usedspace=(unsigned INTPTR)(heaptop-OFFSET2BASEVA(blockindex));
+ /* Update the lowest free block */
+ if (blockindex < allocationinfo.lowestfreeblock) {
+ blockindex=allocationinfo.lowestfreeblock;
+ }
+
+ /* This is our own block...means we should mark other blocks above us as free*/
+ if (cnum==blockrecord->corenum) {
+ unsigned INTPTR nextlocalblocknum=GLOBALBLOCK2LOCK(blockindex)+1;
+ for(;nextlocalblocknum<numblockspercore;nextlocalblocknum++) {
+ unsigned INTPTR blocknum=BLOCKINDEX2(cnum, nextlocalblocknum);
+ struct blockrecord * nextblockrecord=&allocationinfo.blocktable[blockindex];
+ nextblockrecord->status=BS_FREE;
+ nextblockrecord->usedspace=0;
+ }
}
}