int gcheadindex=0;
struct pointerblock *gctail=NULL;
int gctailindex=0;
-struct pointerblock *gctail2=NULL;
-int gctailindex2=0;
struct pointerblock *gcspare=NULL;
struct lobjpointerblock *gclobjhead=NULL;
int gclobjheadindex=0;
struct lobjpointerblock *gclobjtail=NULL;
int gclobjtailindex=0;
-struct lobjpointerblock *gclobjtail2=NULL;
-int gclobjtailindex2=0;
struct lobjpointerblock *gclobjspare=NULL;
extern int gcheadindex;
extern struct pointerblock *gctail;
extern int gctailindex;
-extern struct pointerblock *gctail2;
-extern int gctailindex2;
extern struct pointerblock *gcspare;
extern struct lobjpointerblock *gclobjhead;
extern int gclobjheadindex;
extern struct lobjpointerblock *gclobjtail;
extern int gclobjtailindex;
-extern struct lobjpointerblock *gclobjtail2;
-extern int gclobjtailindex2;
extern struct lobjpointerblock *gclobjspare;
static void gc_queueinit() {
// initialize queue
if (gchead==NULL) {
gcheadindex=gctailindex=gctailindex2 = 0;
- gchead=gctail=gctail2=RUNMALLOC(sizeof(struct pointerblock));
+ gchead=gctail=RUNMALLOC(sizeof(struct pointerblock));
} else {
- gctailindex=gctailindex2=gcheadindex=0;
- gctail=gctail2=gchead;
+ gctailindex=gcheadindex=0;
+ gctail=gchead;
}
gchead->next=NULL;
// initialize the large obj queues
if (gclobjhead==NULL) {
gclobjheadindex=0;
gclobjtailindex=0;
- gclobjtailindex2=0;
- gclobjhead=gclobjtail=gclobjtail2=RUNMALLOC(sizeof(struct lobjpointerblock));
+ gclobjhead=gclobjtail=RUNMALLOC(sizeof(struct lobjpointerblock));
} else {
- gclobjtailindex=gclobjtailindex2=gclobjheadindex=0;
- gclobjtail=gclobjtail2=gclobjhead;
+ gclobjtailindex=gclobjheadindex=0;
+ gclobjtail=gclobjhead;
}
gclobjhead->next=gclobjhead->prev=NULL;
}
return gctail->ptrs[gctailindex++];
}
-// dequeue and do not destroy the queue
-static void * gc_dequeue2_I() {
- if (gctailindex2==NUMPTRS) {
- gctail2=gctail2->next;
- gctailindex2=0;
- }
- return gctail2->ptrs[gctailindex2++];
-}
-
static int gc_moreItems_I() {
return !((gchead==gctail)&&(gctailindex==gcheadindex));
}
-static int gc_moreItems2_I() {
- return !((gchead==gctail2)&&(gctailindex2==gcheadindex));
-}
-
// should be invoked with interruption closed
// enqueue a large obj: start addr & length
static void gc_lobjenqueue_I(void * ptr,
return !((gclobjhead==gclobjtail)&&(gclobjtailindex==gclobjheadindex));
}
-// dequeue and don't destroy the queue
-static void gc_lobjdequeue2_I() {
- if (gclobjtailindex2==NUMLOBJPTRS) {
- gclobjtail2=gclobjtail2->next;
- gclobjtailindex2=1;
- } else {
- gclobjtailindex2++;
- }
-}
-
-static int gc_lobjmoreItems2_I() {
- return !((gclobjhead==gclobjtail2)&&(gclobjtailindex2==gclobjheadindex));
-}
-
-// 'reversly' dequeue and don't destroy the queue
-static void gc_lobjdequeue3_I() {
- if (gclobjtailindex2==0) {
- gclobjtail2=gclobjtail2->prev;
- gclobjtailindex2=NUMLOBJPTRS-1;
- } else {
- gclobjtailindex2--;
- }
-}
-
-static int gc_lobjmoreItems3_I() {
- return !((gclobjtail==gclobjtail2)&&(gclobjtailindex2==gclobjtailindex));
-}
-
-static void gc_lobjqueueinit4_I() {
- gclobjtail2 = gclobjtail;
- gclobjtailindex2 = gclobjtailindex;
-}
-
-static void * gc_lobjdequeue4_I(unsigned int * length,
- unsigned int * host) {
- if (gclobjtailindex2==NUMLOBJPTRS) {
- gclobjtail2=gclobjtail2->next;
- gclobjtailindex2=0;
- }
- if(length != NULL) {
- *length = gclobjtail2->lengths[gclobjtailindex2];
- }
- if(host != NULL) {
- *host = (unsigned int)(gclobjtail2->hosts[gclobjtailindex2]);
- }
- return gclobjtail2->lobjs[gclobjtailindex2++];
-}
-
-static int gc_lobjmoreItems4_I() {
- return !((gclobjhead==gclobjtail2)&&(gclobjtailindex2==gclobjheadindex));
-}
-
////////////////////////////////////////////////////////////////////
// functions that can be invoked in normal places
////////////////////////////////////////////////////////////////////
return r;
}
-// dequeue and do not destroy the queue
-static void * gc_dequeue2() {
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- if (gctailindex2==NUMPTRS) {
- gctail2=gctail2->next;
- gctailindex2=0;
- }
- void * r = gctail2->ptrs[gctailindex2++];
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- return r;
-}
-
-static int gc_moreItems2() {
- BAMBOO_ENTER_RUNTIME_MODE_FROM_CLIENT();
- int r = !((gchead==gctail2)&&(gctailindex2==gcheadindex));
- BAMBOO_ENTER_CLIENT_MODE_FROM_RUNTIME();
- return r;
-}
-
static void gc_lobjenqueue(void * ptr,
unsigned int length,
unsigned int host) {
#define OBJMASK 0x40000000 //set towhatever smallest object mark is
#define MARKMASK 0xc0000000 //set towhatever smallest object mark is
-
+/*
+ The bitmap mark array uses 2 mark bits per alignment unit.
+
+ The clever trick is that we encode the length of the object (in
+ units of alignment units) using just these two bits. The basic
+ idea is to generate a variable length encoding of the length in
+ which the length of the encoding is shorter than number of mark
+ bits taken up by the object.
+
+ To make this efficient, it is table driven for objects that are
+ less than 16 alignment units in length. For larger objects, we
+ just use addition.
+*/
/* Return length in units of ALIGNSIZE */
BAMBOO_ASSERTMSG(cacheLObjs(), "Not enough space to cache large objects\n");
}
-void master_compact() {
- // predict number of blocks to fill for each core
- void * tmpheaptop = 0;
- int numpbc = loadbalance(&tmpheaptop);
- //tprintf("numpbc: %d \n", numpbc);
-
- numpbc = BAMBOO_SHARED_MEM_SIZE/BAMBOO_SMEM_SIZE;
- GC_PRINTF("mark phase finished \n");
-
- tmpheaptop = gcbaseva + BAMBOO_SHARED_MEM_SIZE;
- for(int i = 0; i < NUMCORES4GC; i++) {
- unsigned int tmpcoreptr = 0;
- BASEPTR(i, numpbc, &tmpcoreptr);
- // init some data strutures for compact phase
- gcloads[i] = NULL;
- gcfilledblocks[i] = 0;
- gcrequiredmems[i] = 0;
- gccorestatus[i] = 1;
- //send start compact messages to all cores
- //TODO bug here, do not know if the direction is positive or negtive?
- //if (tmpcoreptr < tmpheaptop) {
- gcstopblock[i] = numpbc+1;
- if(i != STARTUPCORE) {
- send_msg_2(i, GCSTARTCOMPACT, numpbc+1);
- } else {
- gcblock2fill = numpbc+1;
- }
- /*} else {
- gcstopblock[i] = numpbc;
- if(i != STARTUPCORE) {
- send_msg_2(i, GCSTARTCOMPACT, numpbc);
- } else {
- gcblock2fill = numpbc;
- }
- }*/
- }
- BAMBOO_CACHE_MF();
- GCPROFILE_ITEM();
- // compact phase
- struct moveHelper * orig = (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
- struct moveHelper * to = (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
- compact_master(orig, to);
- GCPROFILE_ITEM();
- GC_PRINTF("prepare to move large objs \n");
- // move largeObjs
- moveLObjs();
- GC_PRINTF("compact phase finished \n");
- RUNFREE(orig);
- RUNFREE(to);
-}
void master_updaterefs(struct garbagelist * stackptr) {
gc_status_info.gcphase = FLUSHPHASE;
extern gc_status_t gc_status_info;
volatile bool gcprecheck; // indicates if there are updated pregc information
-unsigned int gccurr_heaptop;
+unsigned INTPTR gccurr_heaptop;
struct MGCHash * gcforwardobjtbl; // cache forwarded objs in mark phase
// for mark phase termination
volatile unsigned int gccorestatus[NUMCORESACTIVE];//records status of each core
#define ALIGNMENTSHIFT 5
/* Number of bits used for each alignment unit */
-#define BITSPERALIGNMENT 2
+
#define ALIGNOBJSIZE(x) (x>>ALIGNMENTSHIFT)
#define OBJMAPPINGINDEX(p) ALIGNOBJSIZE((unsigned INTPTR)(p-gcbaseva))
+#define ALIGNUNITS(s) (((s-1)>>ALIGNMENTSHIFT)+1)
-//There are two bits per object
-//00 means not marked
-//11 means first block of object
-//10 means marked block
-
-#define UNMARKED 0
-#define MARKEDFIRST 3
-#define MARKEDLATER 2
-
-//sets y to the marked status of x
-#define GETMARKED(y,x) { unsigned INTPTR offset=ALIGNOBJSIZE((unsigned INTPTR)(x-gcbaseva)); \
- y=(gcmarktbl[offset>>4]>>((offset&15)<<1))&3; }
-
-//sets the marked status of x to y (assumes zero'd)
-#define SETMARKED(y,x) { unsigned INTPTR offset=ALIGNOBJSIZE((unsigned INTPTR)(x-gcbaseva)); \
- gcmarktbl[offset>>4]|=y<<((offset&15)<<1); }
-
-//sets the marked status of x to y (assumes zero'd)
-#define RESETMARKED(x) { unsigned INTPTR offset=ALIGNOBJSIZE((unsigned INTPTR)(x-gcbaseva)); \
- gcmarktbl[offset>>4]&=~(3<<((offset&15)<<1)); }
-#define ALIGNSIZE(s, as) (*((unsigned int*)as))=((((unsigned int)(s-1))&(~(BAMBOO_CACHE_LINE_MASK)))+(BAMBOO_CACHE_LINE_SIZE))
+#define ALIGNSIZE(s) ((((unsigned int)(s-1))&~(ALIGNMENTBYTES-1))+ALIGNMENTBYTES)
// mapping of pointer to block # (start from 0), here the block # is
CACHEADAPT_SAMPLING_DATA_REVISE_INIT(orig, to);
} else
return true;
-}
+ }
}
void compact() {
BAMBOO_ASSERT(COMPACTPHASE == gc_status_info.gcphase);
BAMBOO_CACHE_MF();
- // initialize pointers for comapcting
- struct moveHelper * orig = (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
- struct moveHelper * to = (struct moveHelper *)RUNMALLOC(sizeof(struct moveHelper));
- if(!initOrig_Dst(orig, to)) {
+ // initialize structs for compacting
+ struct moveHelper orig={0,NULL,NULL,0,NULL,0,0,0,0};
+ struct moveHelper to={0,NULL,NULL,0,NULL,0,0,0,0};
+ if(!initOrig_Dst(&orig, &to)) {
// no available data to compact
// send compact finish msg to STARTUP core
- send_msg_6(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE,false,0,to->base,0);
- RUNFREE(orig);
- RUNFREE(to);
+ send_msg_6(STARTUPCORE,GCFINISHCOMPACT,BAMBOO_NUM_OF_CORE,false,0,to.base,0);
} else {
CACHEADAPT_SAMPLING_DATA_REVISE_INIT(orig, to);
unsigned int filledblocks = 0;
void * heaptopptr = NULL;
bool localcompact = true;
- compacthelper(orig, to, &filledblocks, &heaptopptr, &localcompact, false);
- RUNFREE(orig);
- RUNFREE(to);
+ compacthelper(&orig, &to, &filledblocks, &heaptopptr, &localcompact, false);
}
}
-void compact_master(struct moveHelper * orig, struct moveHelper * to) {
- // initialize pointers for comapcting
- initOrig_Dst(orig, to);
- CACHEADAPT_SAMPLING_DATA_REVISE_INIT(orig, to);
+void master_compact() {
+ // predict number of blocks to fill for each core
+ void * tmpheaptop = 0;
+ int numpbc = loadbalance(&tmpheaptop);
+
+ numpbc = BAMBOO_SHARED_MEM_SIZE/BAMBOO_SMEM_SIZE;
+ GC_PRINTF("mark phase finished \n");
+
+ tmpheaptop = gcbaseva + BAMBOO_SHARED_MEM_SIZE;
+ for(int i = 0; i < NUMCORES4GC; i++) {
+ unsigned int tmpcoreptr = 0;
+ BASEPTR(i, numpbc, &tmpcoreptr);
+ // init some data strutures for compact phase
+ gcloads[i] = NULL;
+ gcfilledblocks[i] = 0;
+ gcrequiredmems[i] = 0;
+ gccorestatus[i] = 1;
+ //send start compact messages to all cores
+ gcstopblock[i] = numpbc+1;
+ if(i != STARTUPCORE) {
+ send_msg_2(i, GCSTARTCOMPACT, numpbc+1);
+ } else {
+ gcblock2fill = numpbc+1;
+ }
+ }
+ BAMBOO_CACHE_MF();
+ GCPROFILE_ITEM();
+ // compact phase
+ compact_master();
+ GCPROFILE_ITEM();
+ GC_PRINTF("prepare to move large objs \n");
+ // move largeObjs
+ moveLObjs();
+ GC_PRINTF("compact phase finished \n");
+}
+
+
+void compact_master() {
+ // initialize pointers for compacting
+ struct moveHelper orig={0,NULL,NULL,0,NULL,0,0,0,0};
+ struct moveHelper to={0,NULL,NULL,0,NULL,0,0,0,0};
+
+ initOrig_Dst(&orig, &to);
+ CACHEADAPT_SAMPLING_DATA_REVISE_INIT(&orig, &to);
int filledblocks = 0;
void * heaptopptr = NULL;
bool finishcompact = false;
bool lbmove = false;
while((COMPACTPHASE == gc_status_info.gcphase) || (SUBTLECOMPACTPHASE == gc_status_info.gcphase)) {
if((!finishcompact) && iscontinue) {
- finishcompact = compacthelper(orig,to,&filledblocks,&heaptopptr,&localcompact, lbmove);
+ finishcompact = compacthelper(&orig,&to,&filledblocks,&heaptopptr,&localcompact, lbmove);
}
if(gc_checkCoreStatus()) {
if(gctomove) {
BAMBOO_CACHE_MF();
- to->ptr = gcmovestartaddr;
- to->numblocks = gcblock2fill - 1;
- to->bound = BLOCKBOUND(to->numblocks);
- BASEPTR(gcdstcore, to->numblocks, &(to->base));
- to->offset = to->ptr - to->base;
- to->top = (to->numblocks==0)?(to->offset):(to->bound-BAMBOO_SMEM_SIZE+to->offset);
- to->base = to->ptr;
- to->offset = BAMBOO_CACHE_LINE_SIZE;
- to->ptr += to->offset; // for header
- to->top += to->offset;
+ to.ptr = gcmovestartaddr;
+ to.numblocks = gcblock2fill - 1;
+ to.bound = BLOCKBOUND(to.numblocks);
+ BASEPTR(gcdstcore, to.numblocks, &(to.base));
+ to.offset = to.ptr - to.base;
+ to.top = (to.numblocks==0)?(to.offset):(to.bound-BAMBOO_SMEM_SIZE+to.offset);
+ to.base = to.ptr;
+ to.offset = BAMBOO_CACHE_LINE_SIZE;
+ to.ptr += to.offset; // for header
+ to.top += to.offset;
localcompact = (gcdstcore == BAMBOO_NUM_OF_CORE);
gctomove = false;
iscontinue = true;
unsigned INTPTR blocksize = (((unsigned INTPTR)(ptr-gcbaseva)) < BAMBOO_LARGE_SMEM_BOUND)? BAMBOO_SMEM_SIZE_L:BAMBOO_SMEM_SIZE;
// ptr is a start of a block OR it crosses the boundary of current block
- return *tsize > blocksize;
+ return (*tsize) > blocksize;
}
INLINE unsigned int hostcore(void * ptr) {
#define MARKOBJNONNULL(objptr, ii) {markObj(objptr);}
// NOTE: the objptr should not be NULL and should be a shared obj
-INLINE void markObj(void * objptr) {
+void markObj(void * objptr) {
unsigned int host = hostcore(objptr);
if(BAMBOO_NUM_OF_CORE == host) {
- int markedbit;
- GETMARKED(markedbit, objptr);
// on this core
- if(markedbit == UNMARKED) {
+ if(!checkMark(objptr)) {
// this is the first time that this object is discovered,
// set the flag as DISCOVERED
- SETMARKED(MARKEDFIRST, objptr);
+ setMark(objptr);
gc_enqueue(objptr);
}
} else {
gcself_numsendobjs++;
}
}
-}
+}
INLINE void markgarbagelist(struct garbagelist * listptr) {
for(;listptr!=NULL;listptr=listptr->next) {
}
// enqueue the bamboo_current_thread
- MARKOBJ((void *)bamboo_current_thread, 0);
+ MARKOBJ(bamboo_current_thread, 0);
#endif
}
}
}
-INLINE void mark(bool isfirst, struct garbagelist * stackptr) {
+void mark(bool isfirst, struct garbagelist * stackptr) {
if(isfirst) {
// enqueue root objs
tomark(stackptr);
// mark phase
while(MARKPHASE == gc_status_info.gcphase) {
int counter = 0;
+
while(gc_moreItems2()) {
sendStall = false;
gc_status_info.gcbusystatus = true;
- unsigned int ptr = gc_dequeue2();
+ void * ptr = gc_dequeue2();
unsigned int size = 0;
unsigned int type = 0;
+ bool islarge=isLarge(ptr, &type, &size);
+ unsigned int iunits = ALIGNUNITS(size);
- if(isLarge(ptr, &type, &size)) {
+ setLengthMarked(ptr,iunits);
+
+ if(islarge) {
// ptr is a large object and not marked or enqueued
gc_lobjenqueue(ptr, size, BAMBOO_NUM_OF_CORE);
gcnumlobjs++;
} else {
// ptr is an unmarked active object on this core
- unsigned int isize = 0;
- ALIGNSIZE(size, &isize);
+ unsigned int isize=iunits<<ALIGNMENTSHIFT;
gccurr_heaptop += isize;
-
- if((unsigned int)(ptr + size) > (unsigned int)gcmarkedptrbound) {
- gcmarkedptrbound = (unsigned int)(ptr + size);
- }
+ void *top=ptr+isize;
+ if (top>gcmarkedptrbound)
+ gcmarkedptrbound=top;
}
-
+
// scan the pointers in object
scanPtrsInObj(ptr, type);
}
#ifdef MULTICORE_GC
#include "multicore.h"
+INLINE void gettype_size(void * ptr, int * ttype, unsigned int * tsize);
+INLINE bool isLarge(void * ptr, int * ttype, unsigned int * tsize);
+INLINE unsigned int hostcore(void * ptr);
+INLINE void markgarbagelist(struct garbagelist * listptr);
+INLINE void tomark(struct garbagelist * stackptr);
+INLINE void scanPtrsInObj(void * ptr, int type);
+void markObj(void * objptr);
+void mark(bool isfirst, struct garbagelist * stackptr);
+
#endif // MULTICORE_GC
#endif // BAMBOO_MULTICORE_GC_MARK_H
// data structures for threads
unsigned int * bamboo_thread_queue;
unsigned int bamboo_max_thread_num_mask;
-unsigned int bamboo_current_thread;
+void * bamboo_current_thread;
#endif // MGC
#endif // BAMBOO_MULTICORE_MGC_H
void * data1 = (void *) msgdata[msgdataindex];
MSG_INDEXINC_I();
BAMBOO_ASSERT(ISSHAREDOBJ(data1));
-
+
// received a markedObj msg
- int markedbit;
- GETMARKED(markedbit, data1);
-
- if(markedbit==UNMARKED) {
+ if(!checkMark(objptr)) {
// this is the first time that this object is discovered,
// set the flag as DISCOVERED
- SETMARKED(MARKEDFIRST, data1);
+ setMark(data1);
gc_enqueue_I(data1);
}
gcself_numreceiveobjs++;
#ifdef MGC
initializethreads();
- bamboo_current_thread = 0;
+ bamboo_current_thread = NULL;
#endif // MGC
INITTASKDATA();