2 #include "multicoregcmark.h"
4 #include "multicoreruntime.h"
5 #include "GenericHashtable.h"
9 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
10 extern int numqueues[][NUMCLASSES];
11 extern struct genhashtable * activetasks;
12 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
13 extern struct taskparamdescriptor *currtpd;
14 extern struct LockValue runtime_locks[MAXTASKPARAMS];
15 extern int runtime_locklen;
18 extern struct global_defs_t * global_defs_p;
21 extern unsigned int gcmem_mixed_threshold;
22 extern unsigned int gcmem_mixed_usedmem;
26 extern struct lockvector bamboo_threadlocks;
29 INLINE void gettype_size(void * ptr, int * ttype, unsigned int * tsize) {
30 int type = ((int *)ptr)[0];
32 if(type < NUMCLASSES) {
34 *tsize = classsize[type];
38 struct ArrayObject *ao=(struct ArrayObject *)ptr;
39 unsigned int elementsize=classsize[type];
40 unsigned int length=ao->___length___;
41 *tsize = sizeof(struct ArrayObject)+length*elementsize;
46 INLINE bool isLarge(void * ptr, int * ttype, unsigned int * tsize) {
47 // check if a pointer is referring to a large object
48 gettype_size(ptr, ttype, tsize);
49 unsigned int bound = (BAMBOO_SMEM_SIZE);
50 if(((unsigned int)ptr-gcbaseva) < (BAMBOO_LARGE_SMEM_BOUND)) {
51 bound = (BAMBOO_SMEM_SIZE_L);
53 // ptr is a start of a block OR it acrosses the boundary of current block
54 return (((((unsigned int)ptr-gcbaseva)%(bound))==0)||
55 ((bound-(((unsigned int)ptr-gcbaseva)%bound)) < (*tsize)));
58 INLINE unsigned int hostcore(void * ptr) {
59 // check the host core of ptr
60 unsigned int host = 0;
61 if(1 == (NUMCORES4GC)) {
65 unsigned int t = (unsigned int)ptr - (unsigned int)gcbaseva;
66 if(t < (BAMBOO_LARGE_SMEM_BOUND)) {
67 b = t / (BAMBOO_SMEM_SIZE_L);
69 b = NUMCORES4GC+((t-(BAMBOO_LARGE_SMEM_BOUND))/(BAMBOO_SMEM_SIZE));
71 host = gc_block2core[(b%(NUMCORES4GC*2))];
75 //push the null check into the mark macro
76 //#define MARKOBJ(objptr, ii) {void * marktmpptr=objptr; if (marktmpptr!=NULL) markObj(marktmpptr, __LINE__, ii);}
78 //#define MARKOBJNONNULL(objptr, ii) {markObj(objptr, __LINE__, ii);}
80 #define MARKOBJ(objptr, ii) {void * marktmpptr=objptr; if (marktmpptr!=NULL) markObj(marktmpptr);}
82 #define MARKOBJNONNULL(objptr, ii) {markObj(objptr);}
84 // NOTE: the objptr should not be NULL and should be a shared obj
85 INLINE void markObj(void * objptr) {
86 unsigned int host = hostcore(objptr);
87 if(BAMBOO_NUM_OF_CORE == host) {
89 if(((struct ___Object___ *)objptr)->marked == INIT) {
90 // this is the first time that this object is discovered,
91 // set the flag as DISCOVERED
92 ((struct ___Object___ *)objptr)->marked = DISCOVERED;
93 BAMBOO_CACHE_FLUSH_LINE(objptr);
97 // check if this obj has been forwarded
98 if(!MGCHashcontains(gcforwardobjtbl, (int)objptr)) {
99 // send a msg to host informing that objptr is active
100 send_msg_2(host,GCMARKEDOBJ,objptr);
101 GCPROFILE_RECORD_FORWARD_OBJ();
102 gcself_numsendobjs++;
103 MGCHashadd(gcforwardobjtbl, (int)objptr);
108 INLINE void markgarbagelist(struct garbagelist * listptr) {
109 for(;listptr!=NULL;listptr=listptr->next) {
110 int size=listptr->size;
111 for(int i=0; i<size; i++) {
112 MARKOBJ(listptr->array[i], i);
118 INLINE void tomark(struct garbagelist * stackptr) {
119 BAMBOO_ASSERT(MARKPHASE == gc_status_info.gcphase);
121 gc_status_info.gcbusystatus = true;
124 // enqueue current stack
125 markgarbagelist(stackptr);
127 // enqueue static pointers global_defs_p
128 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
129 markgarbagelist((struct garbagelist *)global_defs_p);
133 // enqueue objectsets
134 if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
135 for(int i=0; i<NUMCLASSES; i++) {
136 struct parameterwrapper ** queues = objectqueues[BAMBOO_NUM_OF_CORE][i];
137 int length = numqueues[BAMBOO_NUM_OF_CORE][i];
138 for(int j = 0; j < length; ++j) {
139 struct parameterwrapper * parameter = queues[j];
140 struct ObjectHash * set=parameter->objectset;
141 struct ObjectNode * ptr=set->listhead;
142 for(;ptr!=NULL;ptr=ptr->lnext) {
143 MARKOBJNONNULL((void *)ptr->key, 0);
149 // euqueue current task descriptor
150 if(currtpd != NULL) {
151 for(int i=0; i<currtpd->numParameters; i++) {
152 // currtpd->parameterArray[i] can not be NULL
153 MARKOBJNONNULL(currtpd->parameterArray[i], i);
157 // euqueue active tasks
158 if(activetasks != NULL) {
159 struct genpointerlist * ptr=activetasks->list;
160 for(;ptr!=NULL;ptr=ptr->inext) {
161 struct taskparamdescriptor *tpd=ptr->src;
162 for(int i=0; i<tpd->numParameters; i++) {
163 // the tpd->parameterArray[i] can not be NULL
164 MARKOBJNONNULL(tpd->parameterArray[i], i);
169 // enqueue cached transferred obj
170 struct QueueItem * tmpobjptr = getHead(&objqueue);
171 for(;tmpobjptr != NULL;tmpobjptr=getNextQueueItem(tmpobjptr)) {
172 struct transObjInfo * objInfo=(struct transObjInfo *)(tmpobjptr->objectptr);
173 // the objptr can not be NULL
174 MARKOBJNONNULL(objInfo->objptr, 0);
177 // enqueue cached objs to be transferred
178 struct QueueItem * item = getHead(totransobjqueue);
179 for(;item != NULL;item=getNextQueueItem(item)) {
180 struct transObjInfo * totransobj=(struct transObjInfo *)(item->objectptr);
181 // the objptr can not be NULL
182 MARKOBJNONNULL(totransobj->objptr, 0);
185 // enqueue lock related info
186 for(int i = 0; i < runtime_locklen; i++) {
187 MARKOBJ((void *)(runtime_locks[i].redirectlock), 0);
188 MARKOBJ((void *)(runtime_locks[i].value), i);
193 // enqueue global thread queue
194 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
196 unsigned int thread_counter = *((unsigned int*)(bamboo_thread_queue+1));
197 if(thread_counter > 0) {
198 unsigned int start = *((unsigned int*)(bamboo_thread_queue+2));
199 for(int i = thread_counter; i > 0; i--) {
200 // the thread obj can not be NULL
201 MARKOBJNONNULL((void *)bamboo_thread_queue[4+start], 0);
202 start = (start+1)&bamboo_max_thread_num_mask;
207 // enqueue the bamboo_threadlocks
208 for(int i = 0; i < bamboo_threadlocks.index; i++) {
209 // the locks can not be NULL
210 MARKOBJNONNULL((void *)(bamboo_threadlocks.locks[i].object), i);
213 // enqueue the bamboo_current_thread
214 MARKOBJ((void *)bamboo_current_thread, 0);
218 INLINE void scanPtrsInObj(void * ptr, int type) {
219 // scan all pointers in ptr
220 unsigned int * pointer = pointerarray[type];
222 /* Array of primitives */
223 #ifdef OBJECTHASPOINTERS
224 pointer=pointerarray[OBJECTTYPE];
225 //handle object class
227 for(int i=1; i<=size; i++) {
228 unsigned int offset=pointer[i];
229 void * objptr=*((void **)(((char *)ptr)+offset));
233 } else if (((unsigned int)pointer)==1) {
234 /* Array of pointers */
235 struct ArrayObject *ao=(struct ArrayObject *) ptr;
236 int length=ao->___length___;
237 for(int i=0; i<length; i++) {
238 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
241 #ifdef OBJECTHASPOINTERS
242 pointer=pointerarray[OBJECTTYPE];
243 //handle object class
245 for(int i=1; i<=size; i++) {
246 unsigned int offset=pointer[i];
247 void * objptr=*((void **)(((char *)ptr)+offset));
254 for(int i=1; i<=size; i++) {
255 unsigned int offset=pointer[i];
256 void * objptr=*((void **)(((char *)ptr)+offset));
262 INLINE void mark(bool isfirst, struct garbagelist * stackptr) {
266 gccurr_heaptop = 0; // record the size of all active objs in this core
267 // aligned but does not consider block boundaries
268 gcmarkedptrbound = 0;
270 unsigned int isize = 0;
271 bool sendStall = false;
273 while(MARKPHASE == gc_status_info.gcphase) {
275 while(gc_moreItems2()) {
277 gc_status_info.gcbusystatus = true;
278 unsigned int ptr = gc_dequeue2();
280 unsigned int size = 0;
281 unsigned int isize = 0;
282 unsigned int type = 0;
283 // check if it is a local obj on this core
284 if(((struct ___Object___ *)ptr)->marked!=DISCOVERED) {
285 // ptr has been marked
287 } else if(isLarge(ptr, &type, &size)) {
288 // ptr is a large object and not marked or enqueued
289 gc_lobjenqueue(ptr, size, BAMBOO_NUM_OF_CORE);
292 //((struct ___Object___ *)ptr)->marked = COMPACTED;
294 // ptr is an unmarked active object on this core
295 ALIGNSIZE(size, &isize);
296 gccurr_heaptop += isize;
298 if((unsigned int)(ptr + size) > (unsigned int)gcmarkedptrbound) {
299 gcmarkedptrbound = (unsigned int)(ptr + size);
302 //((struct ___Object___ *)ptr)->marked = MARKED;
305 ((struct ___Object___ *)ptr)->marked = MARKED;
306 BAMBOO_CACHE_FLUSH_LINE(ptr);
307 // scan the pointers in object
308 scanPtrsInObj(ptr, type);
310 gc_status_info.gcbusystatus = false;
311 // send mark finish msg to core coordinator
312 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
313 int entry_index = waitconfirm ? (gcnumsrobjs_index==0) : gcnumsrobjs_index;
314 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
315 gcnumsendobjs[entry_index][BAMBOO_NUM_OF_CORE]=gcself_numsendobjs;
316 gcnumreceiveobjs[entry_index][BAMBOO_NUM_OF_CORE]=gcself_numreceiveobjs;
317 gcloads[BAMBOO_NUM_OF_CORE] = gccurr_heaptop;
320 send_msg_4(STARTUPCORE,GCFINISHMARK,BAMBOO_NUM_OF_CORE,gcself_numsendobjs,gcself_numreceiveobjs);
325 if(BAMBOO_NUM_OF_CORE == STARTUPCORE)
332 #endif // MULTICORE_GC