2 #include "multicoregcmark.h"
4 #include "multicoreruntime.h"
5 #include "GenericHashtable.h"
8 #include "runtime_arch.h"
15 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
16 extern int numqueues[][NUMCLASSES];
17 extern struct genhashtable * activetasks;
18 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
19 extern struct taskparamdescriptor *currtpd;
20 extern struct LockValue runtime_locks[MAXTASKPARAMS];
21 extern int runtime_locklen;
24 extern struct global_defs_t * global_defs_p;
27 extern unsigned int gcmem_mixed_threshold;
28 extern unsigned int gcmem_mixed_usedmem;
32 extern struct lockvector bamboo_threadlocks;
35 INLINE void gettype_size(void * ptr, int * ttype, unsigned int * tsize) {
36 int type = ((int *)ptr)[0];
38 if(type < NUMCLASSES) {
40 *tsize = classsize[type];
44 struct ArrayObject *ao=(struct ArrayObject *)ptr;
45 unsigned int elementsize=classsize[type];
46 unsigned int length=ao->___length___;
47 *tsize = sizeof(struct ArrayObject)+length*elementsize;
52 INLINE bool isLarge(void * ptr, int * ttype, unsigned int * tsize) {
53 // check if a pointer is referring to a large object
54 gettype_size(ptr, ttype, tsize);
55 unsigned int bound = (BAMBOO_SMEM_SIZE);
56 if(((unsigned int)ptr-gcbaseva) < (BAMBOO_LARGE_SMEM_BOUND)) {
57 bound = (BAMBOO_SMEM_SIZE_L);
59 // ptr is a start of a block OR it acrosses the boundary of current block
60 return (((((unsigned int)ptr-gcbaseva)%(bound))==0)||
61 ((bound-(((unsigned int)ptr-gcbaseva)%bound)) < (*tsize)));
64 INLINE unsigned int hostcore(void * ptr) {
65 // check the host core of ptr
66 unsigned int host = 0;
67 RESIDECORE(ptr, &host);
71 //push the null check into the mark macro
72 //#define MARKOBJ(objptr, ii) {void * marktmpptr=objptr; if (marktmpptr!=NULL) markObj(marktmpptr, __LINE__, ii);}
74 //#define MARKOBJNONNULL(objptr, ii) {markObj(objptr, __LINE__, ii);}
76 #define MARKOBJ(objptr, ii) {void * marktmpptr=objptr; if (marktmpptr!=NULL) markObj(marktmpptr);}
78 #define MARKOBJNONNULL(objptr, ii) {markObj(objptr);}
80 // NOTE: the objptr should not be NULL and should be a shared obj
81 INLINE void markObj(void * objptr) {
82 unsigned int host = hostcore(objptr);
83 if(BAMBOO_NUM_OF_CORE == host) {
85 if(((struct ___Object___ *)objptr)->marked == INIT) {
86 // this is the first time that this object is discovered,
87 // set the flag as DISCOVERED
88 ((struct ___Object___ *)objptr)->marked = DISCOVERED;
89 BAMBOO_CACHE_FLUSH_LINE(objptr);
93 // check if this obj has been forwarded
94 if(!MGCHashcontains(gcforwardobjtbl, (int)objptr)) {
95 // send a msg to host informing that objptr is active
96 send_msg_2(host,GCMARKEDOBJ,objptr);
97 GCPROFILE_RECORD_FORWARD_OBJ();
99 MGCHashadd(gcforwardobjtbl, (int)objptr);
104 INLINE void markgarbagelist(struct garbagelist * listptr) {
105 for(;listptr!=NULL;listptr=listptr->next) {
106 int size=listptr->size;
107 for(int i=0; i<size; i++) {
108 MARKOBJ(listptr->array[i], i);
114 INLINE void tomark(struct garbagelist * stackptr) {
115 BAMBOO_ASSERT(MARKPHASE == gcphase);
120 // enqueue current stack
121 markgarbagelist(stackptr);
123 // enqueue static pointers global_defs_p
124 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
125 markgarbagelist((struct garbagelist *)global_defs_p);
129 // enqueue objectsets
130 if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
131 for(int i=0; i<NUMCLASSES; i++) {
132 struct parameterwrapper ** queues = objectqueues[BAMBOO_NUM_OF_CORE][i];
133 int length = numqueues[BAMBOO_NUM_OF_CORE][i];
134 for(int j = 0; j < length; ++j) {
135 struct parameterwrapper * parameter = queues[j];
136 struct ObjectHash * set=parameter->objectset;
137 struct ObjectNode * ptr=set->listhead;
138 for(;ptr!=NULL;ptr=ptr->lnext) {
139 MARKOBJNONNULL((void *)ptr->key, 0);
145 // euqueue current task descriptor
146 if(currtpd != NULL) {
147 for(int i=0; i<currtpd->numParameters; i++) {
148 // currtpd->parameterArray[i] can not be NULL
149 MARKOBJNONNULL(currtpd->parameterArray[i], i);
153 // euqueue active tasks
154 if(activetasks != NULL) {
155 struct genpointerlist * ptr=activetasks->list;
156 for(;ptr!=NULL;ptr=ptr->inext) {
157 struct taskparamdescriptor *tpd=ptr->src;
158 for(int i=0; i<tpd->numParameters; i++) {
159 // the tpd->parameterArray[i] can not be NULL
160 MARKOBJNONNULL(tpd->parameterArray[i], i);
165 // enqueue cached transferred obj
166 struct QueueItem * tmpobjptr = getHead(&objqueue);
167 for(;tmpobjptr != NULL;tmpobjptr=getNextQueueItem(tmpobjptr)) {
168 struct transObjInfo * objInfo=(struct transObjInfo *)(tmpobjptr->objectptr);
169 // the objptr can not be NULL
170 MARKOBJNONNULL(objInfo->objptr, 0);
173 // enqueue cached objs to be transferred
174 struct QueueItem * item = getHead(totransobjqueue);
175 for(;item != NULL;item=getNextQueueItem(item)) {
176 struct transObjInfo * totransobj=(struct transObjInfo *)(item->objectptr);
177 // the objptr can not be NULL
178 MARKOBJNONNULL(totransobj->objptr, 0);
181 // enqueue lock related info
182 for(int i = 0; i < runtime_locklen; i++) {
183 MARKOBJ((void *)(runtime_locks[i].redirectlock), 0);
184 MARKOBJ((void *)(runtime_locks[i].value), i);
189 // enqueue global thread queue
190 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
192 unsigned int thread_counter = *((unsigned int*)(bamboo_thread_queue+1));
193 if(thread_counter > 0) {
194 unsigned int start = *((unsigned int*)(bamboo_thread_queue+2));
195 for(int i = thread_counter; i > 0; i--) {
196 // the thread obj can not be NULL
197 MARKOBJNONNULL((void *)bamboo_thread_queue[4+start], 0);
198 start = (start+1)&bamboo_max_thread_num_mask;
203 // enqueue the bamboo_threadlocks
204 for(int i = 0; i < bamboo_threadlocks.index; i++) {
205 // the locks can not be NULL
206 MARKOBJNONNULL((void *)(bamboo_threadlocks.locks[i].object), i);
209 // enqueue the bamboo_current_thread
210 MARKOBJ((void *)bamboo_current_thread, 0);
214 INLINE void scanPtrsInObj(void * ptr, int type) {
215 // scan all pointers in ptr
216 unsigned int * pointer = pointerarray[type];
218 /* Array of primitives */
219 #ifdef OBJECTHASPOINTERS
220 pointer=pointerarray[OBJECTTYPE];
221 //handle object class
223 for(int i=1; i<=size; i++) {
224 unsigned int offset=pointer[i];
225 void * objptr=*((void **)(((char *)ptr)+offset));
229 } else if (((unsigned int)pointer)==1) {
230 /* Array of pointers */
231 struct ArrayObject *ao=(struct ArrayObject *) ptr;
232 int length=ao->___length___;
233 for(int i=0; i<length; i++) {
234 void *objptr=((void **)(((char *)&ao->___length___)+sizeof(int)))[i];
237 #ifdef OBJECTHASPOINTERS
238 pointer=pointerarray[OBJECTTYPE];
239 //handle object class
241 for(int i=1; i<=size; i++) {
242 unsigned int offset=pointer[i];
243 void * objptr=*((void **)(((char *)ptr)+offset));
250 for(int i=1; i<=size; i++) {
251 unsigned int offset=pointer[i];
252 void * objptr=*((void **)(((char *)ptr)+offset));
258 INLINE void mark(bool isfirst, struct garbagelist * stackptr) {
262 gccurr_heaptop = 0; // record the size of all active objs in this core
263 // aligned but does not consider block boundaries
264 gcmarkedptrbound = 0;
266 unsigned int isize = 0;
267 bool sendStall = false;
269 while(MARKPHASE == gcphase) {
271 while(gc_moreItems2()) {
274 unsigned int ptr = gc_dequeue2();
276 unsigned int size = 0;
277 unsigned int isize = 0;
278 unsigned int type = 0;
279 // check if it is a local obj on this core
280 if(((struct ___Object___ *)ptr)->marked!=DISCOVERED) {
281 // ptr has been marked
283 } else if(isLarge(ptr, &type, &size)) {
284 // ptr is a large object and not marked or enqueued
285 gc_lobjenqueue(ptr, size, BAMBOO_NUM_OF_CORE);
288 // ptr is an unmarked active object on this core
289 ALIGNSIZE(size, &isize);
290 gccurr_heaptop += isize;
292 if((unsigned int)(ptr + size) > (unsigned int)gcmarkedptrbound) {
293 gcmarkedptrbound = (unsigned int)(ptr + size);
297 ((struct ___Object___ *)ptr)->marked = MARKED;
298 BAMBOO_CACHE_FLUSH_LINE(ptr);
299 // scan the pointers in object
300 scanPtrsInObj(ptr, type);
302 gcbusystatus = false;
303 // send mark finish msg to core coordinator
304 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
305 int entry_index = waitconfirm ? (gcnumsrobjs_index==0) : gcnumsrobjs_index;
306 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
307 gcnumsendobjs[entry_index][BAMBOO_NUM_OF_CORE]=gcself_numsendobjs;
308 gcnumreceiveobjs[entry_index][BAMBOO_NUM_OF_CORE]=gcself_numreceiveobjs;
309 gcloads[BAMBOO_NUM_OF_CORE] = gccurr_heaptop;
312 send_msg_4(STARTUPCORE,GCFINISHMARK,BAMBOO_NUM_OF_CORE,gcself_numsendobjs,gcself_numreceiveobjs);
317 if(BAMBOO_NUM_OF_CORE == STARTUPCORE)
324 #endif // MULTICORE_GC