2 #include "multicoregcflush.h"
3 #include "multicoreruntime.h"
4 #include "ObjectHash.h"
5 #include "GenericHashtable.h"
8 /* Task specific includes */
11 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
12 extern int numqueues[][NUMCLASSES];
13 extern struct genhashtable * activetasks;
14 extern struct parameterwrapper ** objectqueues[][NUMCLASSES];
15 extern struct taskparamdescriptor *currtpd;
16 extern struct LockValue runtime_locks[MAXTASKPARAMS];
17 extern int runtime_locklen;
20 extern struct global_defs_t * global_defs_p;
23 extern struct lockvector bamboo_threadlocks;
26 // NOTE: the objptr should not be NULL and should not be non shared ptr
27 #define FLUSHOBJ(obj, tt) {void *flushtmpptr=obj; if (flushtmpptr!=NULL) obj=flushObj(flushtmpptr);}
28 #define FLUSHOBJNONNULL(obj, tt) {void *flushtmpptr=obj; obj=flushObj(flushtmpptr);}
30 INLINE void * flushObj(void * objptr) {
31 return gcmappingtbl[OBJMAPPINGINDEX((unsigned int)objptr)];
34 INLINE void updategarbagelist(struct garbagelist *listptr) {
35 for(;listptr!=NULL; listptr=listptr->next) {
36 for(int i=0; i<listptr->size; i++) {
37 FLUSHOBJ(listptr->array[i], i);
42 INLINE void flushRuntimeObj(struct garbagelist * stackptr) {
43 // flush current stack
44 updategarbagelist(stackptr);
46 // flush static pointers global_defs_p
47 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
48 updategarbagelist((struct garbagelist *)global_defs_p);
53 if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
54 for(int i=0; i<NUMCLASSES; i++) {
55 struct parameterwrapper ** queues = objectqueues[BAMBOO_NUM_OF_CORE][i];
56 int length = numqueues[BAMBOO_NUM_OF_CORE][i];
57 for(int j = 0; j < length; ++j) {
58 struct parameterwrapper * parameter = queues[j];
59 struct ObjectHash * set=parameter->objectset;
60 for(struct ObjectNode * ptr=set->listhead;ptr!=NULL;ptr=ptr->lnext) {
61 FLUSHOBJNONNULL(ptr->key, 0);
63 ObjectHashrehash(set);
68 // flush current task descriptor
70 for(int i=0; i<currtpd->numParameters; i++) {
71 // the parameter can not be NULL
72 FLUSHOBJNONNULL(currtpd->parameterArray[i], i);
77 if(activetasks != NULL) {
78 for(struct genpointerlist * ptr=activetasks->list;ptr!=NULL;ptr=ptr->inext){
79 struct taskparamdescriptor *tpd=ptr->src;
80 for(int i=0; i<tpd->numParameters; i++) {
81 // the parameter can not be NULL
82 FLUSHOBJNONNULL(tpd->parameterArray[i], i);
85 genrehash(activetasks);
88 // flush cached transferred obj
89 for(struct QueueItem * tmpobjptr = getHead(&objqueue);tmpobjptr != NULL;tmpobjptr = getNextQueueItem(tmpobjptr)) {
90 struct transObjInfo * objInfo=(struct transObjInfo *)(tmpobjptr->objectptr);
91 // the obj can not be NULL
92 FLUSHOBJNONNULL(objInfo->objptr, 0);
95 // flush cached objs to be transferred
96 for(struct QueueItem * item = getHead(totransobjqueue);item != NULL;item = getNextQueueItem(item)) {
97 struct transObjInfo * totransobj = (struct transObjInfo *)(item->objectptr);
98 // the obj can not be NULL
99 FLUSHOBJNONNULL(totransobj->objptr, 0);
102 // enqueue lock related info
103 for(int i = 0; i < runtime_locklen; ++i) {
104 FLUSHOBJ(runtime_locks[i].redirectlock, i);
105 FLUSHOBJ(runtime_locks[i].value, i);
110 // flush the bamboo_threadlocks
111 for(int i = 0; i < bamboo_threadlocks.index; i++) {
112 // the locked obj can not be NULL
113 FLUSHOBJNONNULL(bamboo_threadlocks.locks[i].object, i);
116 // flush the bamboo_current_thread
117 FLUSHOBJ(bamboo_current_thread, 0);
119 // flush global thread queue
120 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
121 unsigned int thread_counter = *((unsigned int*)(bamboo_thread_queue+1));
122 if(thread_counter > 0) {
123 unsigned int start = *((unsigned int*)(bamboo_thread_queue+2));
124 for(int i = thread_counter; i > 0; i--) {
125 // the thread obj can not be NULL
126 FLUSHOBJNONNULL(bamboo_thread_queue[4+start], 0);
127 start = (start+1)&bamboo_max_thread_num_mask;
135 INLINE void flushPtrsInObj(void * ptr) {
136 int type = ((int *)(ptr))[0];
137 // scan all pointers in ptr
138 unsigned int * pointer=pointerarray[type];
140 /* Array of primitives */
141 #ifdef OBJECTHASPOINTERS
142 //handle object class
143 pointer=pointerarray[OBJECTTYPE];
144 unsigned int size=pointer[0];
145 for(int i=1; i<=size; i++) {
146 unsigned int offset=pointer[i];
147 FLUSHOBJ(*((void **)(((char *)ptr)+offset)), i);
150 } else if (((unsigned int)pointer)==1) {
151 /* Array of pointers */
152 struct ArrayObject *ao=(struct ArrayObject *) ptr;
153 int length=ao->___length___;
154 for(int j=0; j<length; j++) {
155 FLUSHOBJ(((void **)(((char *)&ao->___length___)+sizeof(int)))[j], j);
157 #ifdef OBJECTHASPOINTERS
158 pointer=pointerarray[OBJECTTYPE];
159 //handle object class
160 unsigned int size=pointer[0];
162 for(int i=1; i<=size; i++) {
163 unsigned int offset=pointer[i];
164 FLUSHOBJ(*((void **)(((char *)ptr)+offset)), i);
168 unsigned int size=pointer[0];
170 for(int i=1; i<=size; i++) {
171 unsigned int offset=pointer[i];
172 FLUSHOBJ(*((void **)(((char *)ptr)+offset)), i);
177 void flush(struct garbagelist * stackptr) {
180 flushRuntimeObj(stackptr);
181 while(gc_moreItems()) {
182 unsigned int ptr = gc_dequeue();
183 // should be a local shared obj and should have mapping info
184 FLUSHOBJNONNULL(ptr, 0);
185 BAMBOO_ASSERT(ptr != NULL);
187 if(((struct ___Object___ *)ptr)->marked == COMPACTED) {
188 flushPtrsInObj((void *)ptr);
189 // restore the mark field, indicating that this obj has been flushed
190 ((struct ___Object___ *)ptr)->marked = INIT;
194 // TODO bug here: the startup core contains all lobjs' info, thus all the
195 // lobjs are flushed in sequence.
197 while(gc_lobjmoreItems_I()) {
198 unsigned int ptr = gc_lobjdequeue_I(NULL, NULL);
200 BAMBOO_ASSERT(ptr!=NULL);
202 if(((struct ___Object___ *)ptr)->marked == COMPACTED) {
203 flushPtrsInObj((void *)ptr);
204 // restore the mark field, indicating that this obj has been flushed
205 ((struct ___Object___ *)ptr)->marked = INIT;
209 // send flush finish message to core coordinator
210 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
211 gccorestatus[BAMBOO_NUM_OF_CORE] = 0;
213 send_msg_2(STARTUPCORE,GCFINISHFLUSH,BAMBOO_NUM_OF_CORE);
217 #endif // MULTICORE_GC