3 #include "multicoreruntime.h"
4 #include "runtime_arch.h"
5 #include "GenericHashtable.h"
7 // data structures for task invocation
8 struct genhashtable * activetasks;
9 struct taskparamdescriptor * currtpd;
10 struct LockValue runtime_locks[MAXTASKPARAMS];
13 // specific functions used inside critical sections
14 void enqueueObject_I(void * ptr,
15 struct parameterwrapper ** queues,
17 int enqueuetasks_I(struct parameterwrapper *parameter,
18 struct parameterwrapper *prevptr,
19 struct ___Object___ *ptr,
24 inline __attribute__((always_inline))
25 void setupsmemmode(void) {
27 bamboo_smem_mode = SMEMLOCAL;
29 bamboo_smem_mode = SMEMFIXED;
31 bamboo_smem_mode = SMEMMIXED;
33 bamboo_smem_mode = SMEMGLOBAL;
35 // defaultly using local mode
36 bamboo_smem_mode = SMEMLOCAL;
38 } // void setupsmemmode(void)
41 inline __attribute__((always_inline))
42 void initruntimedata() {
44 // initialize the arrays
45 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
46 // startup core to initialize corestatus[]
47 for(i = 0; i < NUMCORESACTIVE; ++i) {
50 numreceiveobjs[i] = 0;
52 // initialize the profile data arrays
55 } // for(i = 0; i < NUMCORESACTIVE; ++i)
57 for(i = 0; i < NUMCORES4GC; ++i) {
60 gcnumreceiveobjs[i] = 0;
62 gcrequiredmems[i] = 0;
64 gcfilledblocks[i] = 0;
65 } // for(i = 0; i < NUMCORES4GC; ++i)
76 self_numreceiveobjs = 0;
78 for(i = 0; i < BAMBOO_MSG_BUF_LENGTH; ++i) {
82 msglength = BAMBOO_MSG_BUF_LENGTH;
83 for(i = 0; i < BAMBOO_OUT_BUF_LENGTH; ++i) {
93 bamboo_cur_msp = NULL;
95 totransobjqueue = createQueue();
100 gcphase = FINISHPHASE;
102 gcself_numsendobjs = 0;
103 gcself_numreceiveobjs = 0;
104 gcmarkedptrbound = 0;
105 gcpointertbl = allocateRuntimeHash(20);
117 gcsbstarttbl = BAMBOO_BASE_VA;
118 gcsmemtbl = RUNMALLOC_I(sizeof(int)*gcnumblock);
120 // create the lock table, lockresult table and obj queue
123 (struct RuntimeNode **) RUNMALLOC_I(sizeof(struct RuntimeNode *)*20);
124 /* Set allocation blocks*/
125 locktable.listhead=NULL;
126 locktable.listtail=NULL;
128 locktable.numelements = 0;
133 lockRedirectTbl = allocateRuntimeHash(20);
134 objRedirectLockTbl = allocateRuntimeHash(20);
139 objqueue.head = NULL;
140 objqueue.tail = NULL;
146 //isInterrupt = true;
149 taskInfoOverflow = false;
150 /*interruptInfoIndex = 0;
151 interruptInfoOverflow = false;*/
154 for(i = 0; i < MAXTASKPARAMS; i++) {
155 runtime_locks[i].redirectlock = 0;
156 runtime_locks[i].value = 0;
161 inline __attribute__((always_inline))
162 void disruntimedata() {
164 freeRuntimeHash(gcpointertbl);
166 freeRuntimeHash(lockRedirectTbl);
167 freeRuntimeHash(objRedirectLockTbl);
168 RUNFREE(locktable.bucket);
170 if(activetasks != NULL) {
171 genfreehashtable(activetasks);
173 if(currtpd != NULL) {
174 RUNFREE(currtpd->parameterArray);
180 inline __attribute__((always_inline))
181 bool checkObjQueue() {
183 struct transObjInfo * objInfo = NULL;
187 #ifdef ACCURATEPROFILE
188 bool isChecking = false;
189 if(!isEmpty(&objqueue)) {
190 profileTaskStart("objqueue checking");
192 } // if(!isEmpty(&objqueue))
196 while(!isEmpty(&objqueue)) {
198 BAMBOO_START_CRITICAL_SECTION_OBJ_QUEUE();
200 BAMBOO_DEBUGPRINT(0xf001);
203 //isInterrupt = false;
206 BAMBOO_DEBUGPRINT(0xeee1);
209 objInfo = (struct transObjInfo *)getItem(&objqueue);
210 obj = objInfo->objptr;
212 BAMBOO_DEBUGPRINT_REG((int)obj);
214 // grab lock and flush the obj
218 BAMBOO_WAITING_FOR_LOCK();
219 } // while(!lockflag)
222 BAMBOO_DEBUGPRINT_REG(grount);
237 BAMBOO_CACHE_FLUSH_RANGE((int)obj,sizeof(int));
238 BAMBOO_CACHE_FLUSH_RANGE((int)obj,
239 classsize[((struct ___Object___ *)obj)->type]);
241 // enqueue the object
242 for(k = 0; k < objInfo->length; ++k) {
243 int taskindex = objInfo->queues[2 * k];
244 int paramindex = objInfo->queues[2 * k + 1];
245 struct parameterwrapper ** queues =
246 &(paramqueues[BAMBOO_NUM_OF_CORE][taskindex][paramindex]);
248 BAMBOO_DEBUGPRINT_REG(taskindex);
249 BAMBOO_DEBUGPRINT_REG(paramindex);
250 struct ___Object___ * tmpptr = (struct ___Object___ *)obj;
251 tprintf("Process %x(%d): receive obj %x(%lld), ptrflag %x\n",
252 BAMBOO_NUM_OF_CORE, BAMBOO_NUM_OF_CORE, (int)obj,
253 (long)obj, tmpptr->flag);
255 enqueueObject_I(obj, queues, 1);
257 BAMBOO_DEBUGPRINT_REG(hashsize(activetasks));
259 } // for(k = 0; k < objInfo->length; ++k)
260 releasewritelock_I(obj);
261 RUNFREE(objInfo->queues);
265 // put it at the end of the queue if no update version in the queue
266 struct QueueItem * qitem = getHead(&objqueue);
267 struct QueueItem * prev = NULL;
268 while(qitem != NULL) {
269 struct transObjInfo * tmpinfo =
270 (struct transObjInfo *)(qitem->objectptr);
271 if(tmpinfo->objptr == obj) {
272 // the same object in the queue, which should be enqueued
273 // recently. Current one is outdate, do not re-enqueue it
274 RUNFREE(objInfo->queues);
279 } // if(tmpinfo->objptr == obj)
280 qitem = getNextQueueItem(prev);
281 } // while(qitem != NULL)
282 // try to execute active tasks already enqueued first
283 addNewItem_I(&objqueue, objInfo);
285 //isInterrupt = true;
288 BAMBOO_CLOSE_CRITICAL_SECTION_OBJ_QUEUE();
290 BAMBOO_DEBUGPRINT(0xf000);
294 BAMBOO_CLOSE_CRITICAL_SECTION_OBJ_QUEUE();
296 BAMBOO_DEBUGPRINT(0xf000);
298 } // while(!isEmpty(&objqueue))
301 #ifdef ACCURATEPROFILE
309 BAMBOO_DEBUGPRINT(0xee02);
314 inline __attribute__((always_inline))
315 void checkCoreStatus() {
316 bool allStall = false;
320 (waitconfirm && (numconfirm == 0))) {
322 BAMBOO_DEBUGPRINT(0xee04);
323 BAMBOO_DEBUGPRINT_REG(waitconfirm);
325 BAMBOO_START_CRITICAL_SECTION_STATUS();
327 BAMBOO_DEBUGPRINT(0xf001);
329 corestatus[BAMBOO_NUM_OF_CORE] = 0;
330 numsendobjs[BAMBOO_NUM_OF_CORE] = self_numsendobjs;
331 numreceiveobjs[BAMBOO_NUM_OF_CORE] = self_numreceiveobjs;
332 // check the status of all cores
335 BAMBOO_DEBUGPRINT_REG(NUMCORESACTIVE);
337 for(i = 0; i < NUMCORESACTIVE; ++i) {
339 BAMBOO_DEBUGPRINT(0xe000 + corestatus[i]);
341 if(corestatus[i] != 0) {
345 } // for(i = 0; i < NUMCORESACTIVE; ++i)
347 // check if the sum of send objs and receive obj are the same
348 // yes->check if the info is the latest; no->go on executing
350 for(i = 0; i < NUMCORESACTIVE; ++i) {
351 sumsendobj += numsendobjs[i];
353 BAMBOO_DEBUGPRINT(0xf000 + numsendobjs[i]);
355 } // for(i = 0; i < NUMCORESACTIVE; ++i)
356 for(i = 0; i < NUMCORESACTIVE; ++i) {
357 sumsendobj -= numreceiveobjs[i];
359 BAMBOO_DEBUGPRINT(0xf000 + numreceiveobjs[i]);
361 } // for(i = 0; i < NUMCORESACTIVE; ++i)
362 if(0 == sumsendobj) {
364 // the first time found all cores stall
365 // send out status confirm msg to all other cores
366 // reset the corestatus array too
368 BAMBOO_DEBUGPRINT(0xee05);
370 corestatus[BAMBOO_NUM_OF_CORE] = 1;
371 for(i = 1; i < NUMCORESACTIVE; ++i) {
373 // send status confirm msg to core i
374 send_msg_1(i, STATUSCONFIRM);
375 } // for(i = 1; i < NUMCORESACTIVE; ++i)
377 numconfirm = NUMCORESACTIVE - 1;
379 // all the core status info are the latest
380 // terminate; for profiling mode, send request to all
381 // other cores to pour out profiling data
383 BAMBOO_DEBUGPRINT(0xee06);
387 totalexetime = BAMBOO_GET_EXE_TIME();
389 BAMBOO_DEBUGPRINT(BAMBOO_GET_EXE_TIME());
390 BAMBOO_DEBUGPRINT_REG(total_num_t6); // TODO for test
391 BAMBOO_DEBUGPRINT(0xbbbbbbbb);
393 // profile mode, send msgs to other cores to request pouring
394 // out progiling data
396 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
398 BAMBOO_DEBUGPRINT(0xf000);
400 for(i = 1; i < NUMCORESACTIVE; ++i) {
401 // send profile request msg to core i
402 send_msg_2(i, PROFILEOUTPUT, totalexetime);
403 } // for(i = 1; i < NUMCORESACTIVE; ++i)
404 // pour profiling data on startup core
407 BAMBOO_START_CRITICAL_SECTION_STATUS();
409 BAMBOO_DEBUGPRINT(0xf001);
411 profilestatus[BAMBOO_NUM_OF_CORE] = 0;
412 // check the status of all cores
415 BAMBOO_DEBUGPRINT_REG(NUMCORESACTIVE);
417 for(i = 0; i < NUMCORESACTIVE; ++i) {
419 BAMBOO_DEBUGPRINT(0xe000 + profilestatus[i]);
421 if(profilestatus[i] != 0) {
425 } // for(i = 0; i < NUMCORESACTIVE; ++i)
428 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
430 BAMBOO_DEBUGPRINT(0xf000);
440 terminate(); // All done.
441 } // if(!waitconfirm)
443 // still some objects on the fly on the network
444 // reset the waitconfirm and numconfirm
446 BAMBOO_DEBUGPRINT(0xee07);
450 } // if(0 == sumsendobj)
452 // not all cores are stall, keep on waiting
454 BAMBOO_DEBUGPRINT(0xee08);
459 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
461 BAMBOO_DEBUGPRINT(0xf000);
463 } // if((!waitconfirm) ||
466 // main function for each core
467 inline void run(void * arg) {
471 bool sendStall = false;
473 bool tocontinue = false;
475 corenum = BAMBOO_GET_NUM_OF_CORE();
477 BAMBOO_DEBUGPRINT(0xeeee);
478 BAMBOO_DEBUGPRINT_REG(corenum);
479 BAMBOO_DEBUGPRINT(STARTUPCORE);
482 // initialize runtime data structures
485 // other architecture related initialization
489 initializeexithandler();
491 // main process of the execution module
492 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
493 // non-executing cores, only processing communications
496 BAMBOO_DEBUGPRINT(0xee01);
497 BAMBOO_DEBUGPRINT_REG(taskInfoIndex);
498 BAMBOO_DEBUGPRINT_REG(taskInfoOverflow);
499 profileTaskStart("msg handling");
503 //isInterrupt = false;
507 /* Create queue of active tasks */
509 genallocatehashtable((unsigned int(*) (void *)) &hashCodetpd,
510 (int(*) (void *,void *)) &comparetpd);
512 /* Process task information */
515 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
516 /* Create startup object */
517 createstartupobject(argc, argv);
521 BAMBOO_DEBUGPRINT(0xee00);
526 // check if need to do GC
530 // check if there are new active tasks can be executed
537 while(receiveObject() != -1) {
542 BAMBOO_DEBUGPRINT(0xee01);
545 // check if there are some pending objects,
546 // if yes, enqueue them and executetasks again
547 tocontinue = checkObjQueue();
551 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
554 BAMBOO_DEBUGPRINT(0xee03);
562 BAMBOO_DEBUGPRINT(0xee09);
568 // wait for some time
571 BAMBOO_DEBUGPRINT(0xee0a);
577 // send StallMsg to startup core
579 BAMBOO_DEBUGPRINT(0xee0b);
582 send_msg_4(STARTUPCORE, TRANSTALL, BAMBOO_NUM_OF_CORE,
583 self_numsendobjs, self_numreceiveobjs);
595 BAMBOO_DEBUGPRINT(0xee0c);
598 } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE)
601 } // if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1)
605 struct ___createstartupobject____I_locals {
608 struct ___StartupObject___ * ___startupobject___;
609 struct ArrayObject * ___stringarray___;
610 }; // struct ___createstartupobject____I_locals
612 void createstartupobject(int argc,
616 /* Allocate startup object */
618 struct ___createstartupobject____I_locals ___locals___={2, NULL, NULL, NULL};
619 struct ___StartupObject___ *startupobject=
620 (struct ___StartupObject___*) allocate_new(&___locals___, STARTUPTYPE);
621 ___locals___.___startupobject___ = startupobject;
622 struct ArrayObject * stringarray=
623 allocate_newarray(&___locals___, STRINGARRAYTYPE, argc-1);
624 ___locals___.___stringarray___ = stringarray;
626 struct ___StartupObject___ *startupobject=
627 (struct ___StartupObject___*) allocate_new(STARTUPTYPE);
628 struct ArrayObject * stringarray=
629 allocate_newarray(STRINGARRAYTYPE, argc-1);
631 /* Build array of strings */
632 startupobject->___parameters___=stringarray;
633 for(i=1; i<argc; i++) {
634 int length=strlen(argv[i]);
636 struct ___String___ *newstring=NewString(&___locals___, argv[i],length);
638 struct ___String___ *newstring=NewString(argv[i],length);
640 ((void **)(((char *)&stringarray->___length___)+sizeof(int)))[i-1]=
644 startupobject->version = 0;
645 startupobject->lock = NULL;
647 /* Set initialized flag for startup object */
648 flagorandinit(startupobject,1,0xFFFFFFFF);
649 enqueueObject(startupobject, NULL, 0);
651 BAMBOO_CACHE_FLUSH_ALL();
655 int hashCodetpd(struct taskparamdescriptor *ftd) {
656 int hash=(int)ftd->task;
658 for(i=0; i<ftd->numParameters; i++) {
659 hash^=(int)ftd->parameterArray[i];
664 int comparetpd(struct taskparamdescriptor *ftd1,
665 struct taskparamdescriptor *ftd2) {
667 if (ftd1->task!=ftd2->task)
669 for(i=0; i<ftd1->numParameters; i++)
670 if(ftd1->parameterArray[i]!=ftd2->parameterArray[i])
675 /* This function sets a tag. */
677 void tagset(void *ptr,
678 struct ___Object___ * obj,
679 struct ___TagDescriptor___ * tagd) {
681 void tagset(struct ___Object___ * obj,
682 struct ___TagDescriptor___ * tagd) {
684 struct ArrayObject * ao=NULL;
685 struct ___Object___ * tagptr=obj->___tags___;
687 obj->___tags___=(struct ___Object___ *)tagd;
689 /* Have to check if it is already set */
690 if (tagptr->type==TAGTYPE) {
691 struct ___TagDescriptor___ * td=(struct ___TagDescriptor___ *) tagptr;
696 int ptrarray[]={2, (int) ptr, (int) obj, (int)tagd};
697 struct ArrayObject * ao=
698 allocate_newarray(&ptrarray,TAGARRAYTYPE,TAGARRAYINTERVAL);
699 obj=(struct ___Object___ *)ptrarray[2];
700 tagd=(struct ___TagDescriptor___ *)ptrarray[3];
701 td=(struct ___TagDescriptor___ *) obj->___tags___;
703 ao=allocate_newarray(TAGARRAYTYPE,TAGARRAYINTERVAL);
706 ARRAYSET(ao, struct ___TagDescriptor___ *, 0, td);
707 ARRAYSET(ao, struct ___TagDescriptor___ *, 1, tagd);
708 obj->___tags___=(struct ___Object___ *) ao;
709 ao->___cachedCode___=2;
713 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
714 for(i=0; i<ao->___cachedCode___; i++) {
715 struct ___TagDescriptor___ * td=
716 ARRAYGET(ao, struct ___TagDescriptor___*, i);
721 if (ao->___cachedCode___<ao->___length___) {
722 ARRAYSET(ao, struct ___TagDescriptor___ *, ao->___cachedCode___, tagd);
723 ao->___cachedCode___++;
726 int ptrarray[]={2,(int) ptr, (int) obj, (int) tagd};
727 struct ArrayObject * aonew=
728 allocate_newarray(&ptrarray,TAGARRAYTYPE,
729 TAGARRAYINTERVAL+ao->___length___);
730 obj=(struct ___Object___ *)ptrarray[2];
731 tagd=(struct ___TagDescriptor___ *) ptrarray[3];
732 ao=(struct ArrayObject *)obj->___tags___;
734 struct ArrayObject * aonew=
735 allocate_newarray(TAGARRAYTYPE,TAGARRAYINTERVAL+ao->___length___);
738 aonew->___cachedCode___=ao->___length___+1;
739 for(i=0; i<ao->___length___; i++) {
740 ARRAYSET(aonew, struct ___TagDescriptor___*, i,
741 ARRAYGET(ao, struct ___TagDescriptor___*, i));
743 ARRAYSET(aonew, struct ___TagDescriptor___ *, ao->___length___, tagd);
749 struct ___Object___ * tagset=tagd->flagptr;
752 } else if (tagset->type!=OBJECTARRAYTYPE) {
754 int ptrarray[]={2, (int) ptr, (int) obj, (int)tagd};
755 struct ArrayObject * ao=
756 allocate_newarray(&ptrarray,OBJECTARRAYTYPE,OBJECTARRAYINTERVAL);
757 obj=(struct ___Object___ *)ptrarray[2];
758 tagd=(struct ___TagDescriptor___ *)ptrarray[3];
760 struct ArrayObject * ao=
761 allocate_newarray(OBJECTARRAYTYPE,OBJECTARRAYINTERVAL);
763 ARRAYSET(ao, struct ___Object___ *, 0, tagd->flagptr);
764 ARRAYSET(ao, struct ___Object___ *, 1, obj);
765 ao->___cachedCode___=2;
766 tagd->flagptr=(struct ___Object___ *)ao;
768 struct ArrayObject *ao=(struct ArrayObject *) tagset;
769 if (ao->___cachedCode___<ao->___length___) {
770 ARRAYSET(ao, struct ___Object___*, ao->___cachedCode___++, obj);
774 int ptrarray[]={2, (int) ptr, (int) obj, (int)tagd};
775 struct ArrayObject * aonew=
776 allocate_newarray(&ptrarray,OBJECTARRAYTYPE,
777 OBJECTARRAYINTERVAL+ao->___length___);
778 obj=(struct ___Object___ *)ptrarray[2];
779 tagd=(struct ___TagDescriptor___ *)ptrarray[3];
780 ao=(struct ArrayObject *)tagd->flagptr;
782 struct ArrayObject * aonew=
783 allocate_newarray(OBJECTARRAYTYPE,OBJECTARRAYINTERVAL+ao->___length___);
785 aonew->___cachedCode___=ao->___cachedCode___+1;
786 for(i=0; i<ao->___length___; i++) {
787 ARRAYSET(aonew, struct ___Object___*, i,
788 ARRAYGET(ao, struct ___Object___*, i));
790 ARRAYSET(aonew, struct ___Object___ *, ao->___cachedCode___, obj);
791 tagd->flagptr=(struct ___Object___ *) aonew;
797 /* This function clears a tag. */
799 void tagclear(void *ptr,
800 struct ___Object___ * obj,
801 struct ___TagDescriptor___ * tagd) {
803 void tagclear(struct ___Object___ * obj,
804 struct ___TagDescriptor___ * tagd) {
806 /* We'll assume that tag is alway there.
807 Need to statically check for this of course. */
808 struct ___Object___ * tagptr=obj->___tags___;
810 if (tagptr->type==TAGTYPE) {
811 if ((struct ___TagDescriptor___ *)tagptr==tagd)
812 obj->___tags___=NULL;
814 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
816 for(i=0; i<ao->___cachedCode___; i++) {
817 struct ___TagDescriptor___ * td=
818 ARRAYGET(ao, struct ___TagDescriptor___ *, i);
820 ao->___cachedCode___--;
821 if (i<ao->___cachedCode___)
822 ARRAYSET(ao, struct ___TagDescriptor___ *, i,
823 ARRAYGET(ao, struct ___TagDescriptor___ *, ao->___cachedCode___));
824 ARRAYSET(ao, struct ___TagDescriptor___ *, ao->___cachedCode___, NULL);
825 if (ao->___cachedCode___==0)
826 obj->___tags___=NULL;
833 struct ___Object___ *tagset=tagd->flagptr;
834 if (tagset->type!=OBJECTARRAYTYPE) {
838 struct ArrayObject *ao=(struct ArrayObject *) tagset;
840 for(i=0; i<ao->___cachedCode___; i++) {
841 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, i);
843 ao->___cachedCode___--;
844 if (i<ao->___cachedCode___)
845 ARRAYSET(ao, struct ___Object___ *, i,
846 ARRAYGET(ao, struct ___Object___ *, ao->___cachedCode___));
847 ARRAYSET(ao, struct ___Object___ *, ao->___cachedCode___, NULL);
848 if (ao->___cachedCode___==0)
859 /* This function allocates a new tag. */
861 struct ___TagDescriptor___ * allocate_tag(void *ptr,
863 struct ___TagDescriptor___ * v=
864 (struct ___TagDescriptor___ *) FREEMALLOC((struct garbagelist *) ptr,
867 struct ___TagDescriptor___ * allocate_tag(int index) {
868 struct ___TagDescriptor___ * v=FREEMALLOC(classsize[TAGTYPE]);
877 /* This function updates the flag for object ptr. It or's the flag
878 with the or mask and and's it with the andmask. */
880 void flagbody(struct ___Object___ *ptr,
882 struct parameterwrapper ** queues,
886 int flagcomp(const int *val1, const int *val2) {
887 return (*val1)-(*val2);
890 void flagorand(void * ptr,
893 struct parameterwrapper ** queues,
896 int oldflag=((int *)ptr)[1];
897 int flag=ormask|oldflag;
899 flagbody(ptr, flag, queues, length, false);
903 bool intflagorand(void * ptr,
907 int oldflag=((int *)ptr)[1];
908 int flag=ormask|oldflag;
910 if (flag==oldflag) /* Don't do anything */
913 flagbody(ptr, flag, NULL, 0, false);
919 void flagorandinit(void * ptr,
922 int oldflag=((int *)ptr)[1];
923 int flag=ormask|oldflag;
925 flagbody(ptr,flag,NULL,0,true);
928 void flagbody(struct ___Object___ *ptr,
930 struct parameterwrapper ** vqueues,
933 struct parameterwrapper * flagptr = NULL;
935 struct parameterwrapper ** queues = vqueues;
936 int length = vlength;
939 int * enterflags = NULL;
940 if((!isnew) && (queues == NULL)) {
941 if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
942 queues = objectqueues[BAMBOO_NUM_OF_CORE][ptr->type];
943 length = numqueues[BAMBOO_NUM_OF_CORE][ptr->type];
950 /*Remove object from all queues */
951 for(i = 0; i < length; ++i) {
953 ObjectHashget(flagptr->objectset, (int) ptr, (int *) &next,
954 (int *) &enterflags, &UNUSED, &UNUSED2);
955 ObjectHashremove(flagptr->objectset, (int)ptr);
956 if (enterflags!=NULL)
961 void enqueueObject(void * vptr,
962 struct parameterwrapper ** vqueues,
964 struct ___Object___ *ptr = (struct ___Object___ *)vptr;
967 //struct QueueItem *tmpptr;
968 struct parameterwrapper * parameter=NULL;
971 struct parameterwrapper * prevptr=NULL;
972 struct ___Object___ *tagptr=NULL;
973 struct parameterwrapper ** queues = vqueues;
974 int length = vlength;
975 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
979 queues = objectqueues[BAMBOO_NUM_OF_CORE][ptr->type];
980 length = numqueues[BAMBOO_NUM_OF_CORE][ptr->type];
982 tagptr=ptr->___tags___;
984 /* Outer loop iterates through all parameter queues an object of
985 this type could be in. */
986 for(j = 0; j < length; ++j) {
987 parameter = queues[j];
989 if (parameter->numbertags>0) {
991 goto nextloop; //that means the object has no tag
992 //but that param needs tag
993 else if(tagptr->type==TAGTYPE) { //one tag
994 //struct ___TagDescriptor___ * tag=
995 //(struct ___TagDescriptor___*) tagptr;
996 for(i=0; i<parameter->numbertags; i++) {
997 //slotid is parameter->tagarray[2*i];
998 int tagid=parameter->tagarray[2*i+1];
999 if (tagid!=tagptr->flag)
1000 goto nextloop; /*We don't have this tag */
1002 } else { //multiple tags
1003 struct ArrayObject * ao=(struct ArrayObject *) tagptr;
1004 for(i=0; i<parameter->numbertags; i++) {
1005 //slotid is parameter->tagarray[2*i];
1006 int tagid=parameter->tagarray[2*i+1];
1008 for(j=0; j<ao->___cachedCode___; j++) {
1009 if (tagid==ARRAYGET(ao, struct ___TagDescriptor___*, j)->flag)
1020 for(i=0; i<parameter->numberofterms; i++) {
1021 int andmask=parameter->intarray[i*2];
1022 int checkmask=parameter->intarray[i*2+1];
1023 if ((ptr->flag&andmask)==checkmask) {
1024 enqueuetasks(parameter, prevptr, ptr, NULL, 0);
1035 void enqueueObject_I(void * vptr,
1036 struct parameterwrapper ** vqueues,
1038 struct ___Object___ *ptr = (struct ___Object___ *)vptr;
1041 //struct QueueItem *tmpptr;
1042 struct parameterwrapper * parameter=NULL;
1045 struct parameterwrapper * prevptr=NULL;
1046 struct ___Object___ *tagptr=NULL;
1047 struct parameterwrapper ** queues = vqueues;
1048 int length = vlength;
1049 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1052 if(queues == NULL) {
1053 queues = objectqueues[BAMBOO_NUM_OF_CORE][ptr->type];
1054 length = numqueues[BAMBOO_NUM_OF_CORE][ptr->type];
1056 tagptr=ptr->___tags___;
1058 /* Outer loop iterates through all parameter queues an object of
1059 this type could be in. */
1060 for(j = 0; j < length; ++j) {
1061 parameter = queues[j];
1063 if (parameter->numbertags>0) {
1065 goto nextloop; //that means the object has no tag
1066 //but that param needs tag
1067 else if(tagptr->type==TAGTYPE) { //one tag
1068 //struct ___TagDescriptor___ * tag=(struct ___TagDescriptor___*) tagptr;
1069 for(i=0; i<parameter->numbertags; i++) {
1070 //slotid is parameter->tagarray[2*i];
1071 int tagid=parameter->tagarray[2*i+1];
1072 if (tagid!=tagptr->flag)
1073 goto nextloop; /*We don't have this tag */
1075 } else { //multiple tags
1076 struct ArrayObject * ao=(struct ArrayObject *) tagptr;
1077 for(i=0; i<parameter->numbertags; i++) {
1078 //slotid is parameter->tagarray[2*i];
1079 int tagid=parameter->tagarray[2*i+1];
1081 for(j=0; j<ao->___cachedCode___; j++) {
1082 if (tagid==ARRAYGET(ao, struct ___TagDescriptor___*, j)->flag)
1093 for(i=0; i<parameter->numberofterms; i++) {
1094 int andmask=parameter->intarray[i*2];
1095 int checkmask=parameter->intarray[i*2+1];
1096 if ((ptr->flag&andmask)==checkmask) {
1097 enqueuetasks_I(parameter, prevptr, ptr, NULL, 0);
1109 int * getAliasLock(void ** ptrs,
1111 struct RuntimeHash * tbl) {
1113 return (int*)(RUNMALLOC(sizeof(int)));
1118 bool redirect = false;
1119 int redirectlock = 0;
1120 for(; i < length; i++) {
1121 struct ___Object___ * ptr = (struct ___Object___ *)(ptrs[i]);
1124 if(ptr->lock == NULL) {
1127 lock = (int)(ptr->lock);
1130 if(lock != redirectlock) {
1131 RuntimeHashadd(tbl, lock, redirectlock);
1134 if(RuntimeHashcontainskey(tbl, lock)) {
1135 // already redirected
1137 RuntimeHashget(tbl, lock, &redirectlock);
1138 for(; j < locklen; j++) {
1139 if(locks[j] != redirectlock) {
1140 RuntimeHashadd(tbl, locks[j], redirectlock);
1145 for(j = 0; j < locklen; j++) {
1146 if(locks[j] == lock) {
1149 } else if(locks[j] > lock) {
1156 locks[h] = locks[h-1];
1165 return (int *)redirectlock;
1167 return (int *)(locks[0]);
1172 void addAliasLock(void * ptr,
1174 struct ___Object___ * obj = (struct ___Object___ *)ptr;
1175 if(((int)ptr != lock) && (obj->lock != (int*)lock)) {
1176 // originally no alias lock associated or have a different alias lock
1177 // flush it as the new one
1178 obj->lock = (int *)lock;
1183 inline void setTaskExitIndex(int index) {
1184 taskInfoArray[taskInfoIndex]->exitIndex = index;
1187 inline void addNewObjInfo(void * nobj) {
1188 if(taskInfoArray[taskInfoIndex]->newObjs == NULL) {
1189 taskInfoArray[taskInfoIndex]->newObjs = createQueue();
1191 addNewItem(taskInfoArray[taskInfoIndex]->newObjs, nobj);
1196 struct freeMemItem * findFreeMemChunk_I(int coren,
1199 struct freeMemItem * freemem = bamboo_free_mem_list->head;
1200 struct freeMemItem * prev = NULL;
1203 *tofindb = gc_core2block[2*coren+i]+(NUMCORES4GC*2)*j;
1204 // check available shared mem chunks
1207 switch(bamboo_smem_mode) {
1209 int startb = freemem->startblock;
1210 int endb = freemem->endblock;
1211 while(startb > *tofindb) {
1217 *tofindb = gc_core2block[2*coren+i]+(NUMCORES4GC*2)*j;
1218 } // while(startb > tofindb)
1219 if(startb <= *tofindb) {
1220 if((endb >= *tofindb) && (freemem->size >= isize)) {
1222 } else if(*tofindb > gcnumblock-1) {
1223 // no more local mem
1225 } // if(endb >= tofindb)
1226 } // if(startb <= tofindb)
1231 int startb = freemem->startblock;
1232 int endb = freemem->endblock;
1233 if(startb <= *tofindb) {
1234 if((endb >= *tofindb) && (freemem->size >= isize)) {
1238 // use the global mem
1239 if(((startb > NUMCORES4GC-1) && (freemem->size >= isize)) ||
1240 ((endb > NUMCORES4GC-1) && ((freemem->size-
1241 (gcbaseva+BAMBOO_LARGE_SMEM_BOUND-freemem->ptr))>=isize))) {
1249 // TODO not supported yet
1250 BAMBOO_EXIT(0xe001);
1255 foundsmem = (freemem->size >= isize);
1262 if(1 == foundsmem) {
1265 } else if (2 == foundsmem) {
1266 // terminate, no more mem
1270 if(freemem->size == 0) {
1271 // an empty item, remove it
1272 struct freeMemItem * toremove = freemem;
1273 freemem = freemem->next;
1276 bamboo_free_mem_list->head = freemem;
1278 prev->next = freemem;
1280 // put it to the tail of the list for reuse
1281 toremove->next = bamboo_free_mem_list->backuplist;
1282 bamboo_free_mem_list->backuplist = toremove;
1285 freemem = freemem->next;
1287 } while(freemem != NULL);
1290 } // struct freeMemItem * findFreeMemChunk_I(int, int, int *)
1292 void * localmalloc_I(int tofindb,
1294 struct freeMemItem * freemem,
1297 int startb = freemem->startblock;
1298 int endb = freemem->endblock;
1299 int tmpptr = gcbaseva+((tofindb<NUMCORES4GC)?tofindb*BAMBOO_SMEM_SIZE_L
1300 :BAMBOO_LARGE_SMEM_BOUND+(tofindb-NUMCORES4GC)*BAMBOO_SMEM_SIZE);
1301 if((freemem->size+freemem->ptr-tmpptr)>=isize) {
1302 mem = (tmpptr>freemem->ptr)?((void *)tmpptr):(freemem->ptr);
1304 mem = (void *)(freemem->size+freemem->ptr-isize);
1306 // check the remaining space in this block
1307 int remain = (int)(mem-gcbaseva);
1308 int bound = (BAMBOO_SMEM_SIZE);
1309 if(remain < BAMBOO_LARGE_SMEM_BOUND) {
1310 bound = (BAMBOO_SMEM_SIZE_L);
1312 remain = bound - remain%bound;
1313 if(remain < isize) {
1314 // this object acrosses blocks
1317 // round the asigned block to the end of the current block
1318 *allocsize = remain;
1320 if(freemem->ptr == (int)mem) {
1321 freemem->ptr = ((void*)freemem->ptr) + (*allocsize);
1322 freemem->size -= *allocsize;
1323 BLOCKINDEX(freemem->ptr, &(freemem->startblock));
1324 } else if((freemem->ptr+freemem->size) == ((int)mem+(*allocsize))) {
1325 freemem->size -= *allocsize;
1326 BLOCKINDEX(((int)mem)-1, &(freemem->endblock));
1328 struct freeMemItem * tmp =
1329 (struct freeMemItem *)RUNMALLOC_I(sizeof(struct freeMemItem));
1330 tmp->ptr = (int)mem+*allocsize;
1331 tmp->size = freemem->ptr+freemem->size-(int)mem-*allocsize;
1332 BLOCKINDEX(tmp->ptr, &(tmp->startblock));
1333 tmp->endblock = freemem->endblock;
1334 tmp->next = freemem->next;
1335 freemem->next = tmp;
1336 freemem->size = (int)mem - freemem->ptr;
1337 BLOCKINDEX(((int)mem-1), &(freemem->endblock));
1340 } // void * localmalloc_I(int, int, struct freeMemItem *, int *)
1342 void * globalmalloc_I(int isize,
1343 struct freeMemItem * freemem,
1345 void * mem = (void *)(freemem->ptr);
1346 // check the remaining space in this block
1347 int remain = (int)(mem-(BAMBOO_BASE_VA));
1348 int bound = (BAMBOO_SMEM_SIZE);
1349 if(remain < BAMBOO_LARGE_SMEM_BOUND) {
1350 bound = (BAMBOO_SMEM_SIZE_L);
1352 remain = bound - remain%bound;
1353 if(remain < isize) {
1354 // this object acrosses blocks
1357 // round the asigned block to the end of the current block
1358 *allocsize = remain;
1360 freemem->ptr = ((void*)freemem->ptr) + (*allocsize);
1361 freemem->size -= *allocsize;
1363 } // void * globalmalloc_I(int, struct freeMemItem *, int *)
1366 // malloc from the shared memory
1367 void * smemalloc_I(int coren,
1372 int isize = size+(BAMBOO_CACHE_LINE_SIZE);
1373 int toallocate = (isize>(BAMBOO_SMEM_SIZE)) ? (isize):(BAMBOO_SMEM_SIZE);
1374 // go through free mem list for suitable chunks
1376 struct freeMemItem * freemem = findFreeMemChunk_I(coren, isize, &tofindb);
1378 // allocate shared mem if available
1379 if(freemem != NULL) {
1380 switch(bamboo_smem_mode) {
1382 mem = localmalloc_I(tofindb, isize, freemem, allocsize);
1387 int startb = freemem->startblock;
1388 int endb = freemem->endblock;
1389 if(startb > tofindb) {
1390 // malloc on global mem
1391 mem = globalmalloc_I(isize, freemem, allocsize);
1393 // malloc on local mem
1394 mem = localmalloc_I(tofindb, isize, freemem, allocsize);
1400 // TODO not supported yet
1401 BAMBOO_EXIT(0xe002);
1406 mem = globalmalloc_I(isize,freemem, allocsize);
1415 int toallocate = (size>(BAMBOO_SMEM_SIZE)) ? (size):(BAMBOO_SMEM_SIZE);
1416 mem = mspace_calloc(bamboo_free_msp, 1, toallocate);
1417 *allocsize = toallocate;
1420 // no enough shared global memory
1426 BAMBOO_DEBUGPRINT(0xa001);
1427 BAMBOO_EXIT(0xa001);
1431 } // void * smemalloc_I(int, int, int)
1433 // receive object transferred from other cores
1434 // or the terminate message from other cores
1435 // Should be invoked in critical sections!!
1436 // NOTICE: following format is for threadsimulate version only
1437 // RAW version please see previous description
1438 // format: type + object
1439 // type: -1--stall msg
1441 // return value: 0--received an object
1442 // 1--received nothing
1443 // 2--received a Stall Msg
1444 // 3--received a lock Msg
1445 // RAW version: -1 -- received nothing
1446 // otherwise -- received msg type
1447 int receiveObject() {
1451 if(receiveMsg() == -1) {
1455 if(msgdataindex == msglength) {
1456 // received a whole msg
1461 // receive a object transfer msg
1462 struct transObjInfo * transObj =
1463 RUNMALLOC_I(sizeof(struct transObjInfo));
1467 BAMBOO_DEBUGPRINT(0xe880);
1470 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1472 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1474 BAMBOO_EXIT(0xa002);
1476 // store the object and its corresponding queue info, enqueue it later
1477 transObj->objptr = (void *)msgdata[2];
1478 transObj->length = (msglength - 3) / 2;
1479 transObj->queues = RUNMALLOC_I(sizeof(int)*(msglength - 3));
1480 for(k = 0; k < transObj->length; ++k) {
1481 transObj->queues[2*k] = msgdata[3+2*k];
1484 BAMBOO_DEBUGPRINT_REG(transObj->queues[2*k]);
1487 transObj->queues[2*k+1] = msgdata[3+2*k+1];
1490 BAMBOO_DEBUGPRINT_REG(transObj->queues[2*k+1]);
1494 // check if there is an existing duplicate item
1496 struct QueueItem * qitem = getHead(&objqueue);
1497 struct QueueItem * prev = NULL;
1498 while(qitem != NULL) {
1499 struct transObjInfo * tmpinfo =
1500 (struct transObjInfo *)(qitem->objectptr);
1501 if(tmpinfo->objptr == transObj->objptr) {
1502 // the same object, remove outdate one
1503 removeItem(&objqueue, qitem);
1509 qitem = getHead(&objqueue);
1511 qitem = getNextQueueItem(prev);
1514 addNewItem_I(&objqueue, (void *)transObj);
1516 ++(self_numreceiveobjs);
1521 // receive a stall msg
1522 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1523 // non startup core can not receive stall msg
1525 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1527 BAMBOO_EXIT(0xa003);
1529 if(msgdata[1] < NUMCORESACTIVE) {
1532 BAMBOO_DEBUGPRINT(0xe881);
1535 corestatus[msgdata[1]] = 0;
1536 numsendobjs[msgdata[1]] = msgdata[2];
1537 numreceiveobjs[msgdata[1]] = msgdata[3];
1542 // GC version have no lock msgs
1543 #ifndef MULTICORE_GC
1545 // receive lock request msg, handle it right now
1546 // check to see if there is a lock exist for the required obj
1547 // msgdata[1] -> lock type
1548 int data2 = msgdata[2]; // obj pointer
1549 int data3 = msgdata[3]; // lock
1550 int data4 = msgdata[4]; // request core
1551 // -1: redirected, 0: approved, 1: denied
1552 deny = processlockrequest(msgdata[1], data3, data2,
1553 data4, data4, true);
1555 // this lock request is redirected
1558 // send response msg
1559 // for 32 bit machine, the size is always 4 words
1560 int tmp = deny==1?LOCKDENY:LOCKGROUNT;
1562 cache_msg_4(data4, tmp, msgdata[1], data2, data3);
1564 send_msg_4(data4, tmp, msgdata[1], data2, data3);
1571 // receive lock grount msg
1572 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1574 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1576 BAMBOO_EXIT(0xa004);
1578 if((lockobj == msgdata[2]) && (lock2require == msgdata[3])) {
1581 BAMBOO_DEBUGPRINT(0xe882);
1590 // conflicts on lockresults
1592 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1594 BAMBOO_EXIT(0xa005);
1600 // receive lock deny msg
1601 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1603 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1605 BAMBOO_EXIT(0xa006);
1607 if((lockobj == msgdata[2]) && (lock2require == msgdata[3])) {
1610 BAMBOO_DEBUGPRINT(0xe883);
1619 // conflicts on lockresults
1621 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1623 BAMBOO_EXIT(0xa007);
1629 // receive lock release msg
1630 processlockrelease(msgdata[1], msgdata[2], 0, false);
1636 case PROFILEOUTPUT: {
1637 // receive an output profile data request msg
1638 if(BAMBOO_NUM_OF_CORE == STARTUPCORE) {
1639 // startup core can not receive profile output finish msg
1640 BAMBOO_EXIT(0xa008);
1644 BAMBOO_DEBUGPRINT(0xe885);
1648 totalexetime = msgdata[1];
1649 outputProfileData();
1651 cache_msg_2(STARTUPCORE, PROFILEFINISH, BAMBOO_NUM_OF_CORE);
1653 send_msg_2(STARTUPCORE, PROFILEFINISH, BAMBOO_NUM_OF_CORE);
1658 case PROFILEFINISH: {
1659 // receive a profile output finish msg
1660 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1661 // non startup core can not receive profile output finish msg
1663 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1665 BAMBOO_EXIT(0xa009);
1669 BAMBOO_DEBUGPRINT(0xe886);
1672 profilestatus[msgdata[1]] = 0;
1677 // GC version has no lock msgs
1678 #ifndef MULTICORE_GC
1679 case REDIRECTLOCK: {
1680 // receive a redirect lock request msg, handle it right now
1681 // check to see if there is a lock exist for the required obj
1682 int data1 = msgdata[1]; // lock type
1683 int data2 = msgdata[2]; // obj pointer
1684 int data3 = msgdata[3]; // redirect lock
1685 int data4 = msgdata[4]; // root request core
1686 int data5 = msgdata[5]; // request core
1687 deny = processlockrequest(msgdata[1], data3, data2, data5, data4, true);
1689 // this lock request is redirected
1692 // send response msg
1693 // for 32 bit machine, the size is always 4 words
1695 cache_msg_4(data4, deny==1?REDIRECTDENY:REDIRECTGROUNT,
1696 data1, data2, data3);
1698 send_msg_4(data4, deny==1?REDIRECTDENY:REDIRECTGROUNT,
1699 data1, data2, data3);
1705 case REDIRECTGROUNT: {
1706 // receive a lock grant msg with redirect info
1707 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1709 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1711 BAMBOO_EXIT(0xa00a);
1713 if(lockobj == msgdata[2]) {
1716 BAMBOO_DEBUGPRINT(0xe891);
1721 RuntimeHashadd_I(objRedirectLockTbl, lockobj, msgdata[3]);
1726 // conflicts on lockresults
1728 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1730 BAMBOO_EXIT(0xa00b);
1735 case REDIRECTDENY: {
1736 // receive a lock deny msg with redirect info
1737 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1739 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1741 BAMBOO_EXIT(0xa00c);
1743 if(lockobj == msgdata[2]) {
1746 BAMBOO_DEBUGPRINT(0xe892);
1755 // conflicts on lockresults
1757 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1759 BAMBOO_EXIT(0xa00d);
1764 case REDIRECTRELEASE: {
1765 // receive a lock release msg with redirect info
1766 processlockrelease(msgdata[1], msgdata[2], msgdata[3], true);
1771 case STATUSCONFIRM: {
1772 // receive a status confirm info
1773 if((BAMBOO_NUM_OF_CORE == STARTUPCORE)
1774 || (BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1)) {
1775 // wrong core to receive such msg
1776 BAMBOO_EXIT(0xa00e);
1778 // send response msg
1781 BAMBOO_DEBUGPRINT(0xe887);
1785 cache_msg_5(STARTUPCORE, STATUSREPORT,
1786 busystatus?1:0, BAMBOO_NUM_OF_CORE,
1787 self_numsendobjs, self_numreceiveobjs);
1789 send_msg_5(STARTUPCORE, STATUSREPORT,
1790 busystatus?1:0, BAMBOO_NUM_OF_CORE,
1791 self_numsendobjs, self_numreceiveobjs);
1797 case STATUSREPORT: {
1798 // receive a status confirm info
1799 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1800 // wrong core to receive such msg
1802 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1804 BAMBOO_EXIT(0xa00f);
1808 BAMBOO_DEBUGPRINT(0xe888);
1814 corestatus[msgdata[2]] = msgdata[1];
1815 numsendobjs[msgdata[2]] = msgdata[3];
1816 numreceiveobjs[msgdata[2]] = msgdata[4];
1822 // receive a terminate msg
1825 BAMBOO_DEBUGPRINT(0xe889);
1834 // receive a shared memory request msg
1835 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1836 // wrong core to receive such msg
1838 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1840 BAMBOO_EXIT(0xa010);
1844 BAMBOO_DEBUGPRINT(0xe88a);
1849 // is currently doing gc, dump this msg
1854 void * mem = smemalloc_I(msgdata[2], msgdata[1], &allocsize);
1858 // send the start_va to request core
1860 cache_msg_3(msgdata[2], MEMRESPONSE, mem, allocsize);
1862 send_msg_3( msgdata[2], MEMRESPONSE, mem, allocsize);
1869 // receive a shared memory response msg
1872 BAMBOO_DEBUGPRINT(0xe88b);
1877 // is currently doing gc, dump this msg
1881 if(msgdata[2] == 0) {
1882 bamboo_smem_size = 0;
1886 // fill header to store the size of this mem block
1887 (*((int*)msgdata[1])) = msgdata[2];
1888 bamboo_smem_size = msgdata[2] - BAMBOO_CACHE_LINE_SIZE;
1889 bamboo_cur_msp = msgdata[1] + BAMBOO_CACHE_LINE_SIZE;
1891 bamboo_smem_size = msgdata[2];
1892 bamboo_cur_msp =(void*)(msgdata[1]);
1903 gcphase = INITPHASE;
1905 // is waiting for response of mem request
1906 // let it return NULL and start gc
1907 bamboo_smem_size = 0;
1908 bamboo_cur_msp = NULL;
1915 // receive a start GC msg
1918 BAMBOO_DEBUGPRINT(0xe88c);
1922 gcphase = MARKPHASE;
1926 case GCSTARTCOMPACT: {
1927 // a compact phase start msg
1928 gcblock2fill = msgdata[1];
1929 gcphase = COMPACTPHASE;
1933 case GCSTARTFLUSH: {
1934 // received a flush phase start msg
1935 gcphase = FLUSHPHASE;
1939 case GCFINISHINIT: {
1940 // received a init phase finish msg
1941 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1942 // non startup core can not receive this msg
1944 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1946 BAMBOO_EXIT(0xb001);
1949 BAMBOO_DEBUGPRINT(0xe88c);
1950 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1952 if(msgdata[1] < NUMCORES4GC) {
1953 gccorestatus[msgdata[1]] = 0;
1957 case GCFINISHMARK: {
1958 // received a mark phase finish msg
1959 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1960 // non startup core can not receive this msg
1962 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1964 BAMBOO_EXIT(0xb002);
1966 if(msgdata[1] < NUMCORES4GC) {
1967 gccorestatus[msgdata[1]] = 0;
1968 gcnumsendobjs[msgdata[1]] = msgdata[2];
1969 gcnumreceiveobjs[msgdata[1]] = msgdata[3];
1974 case GCFINISHCOMPACT: {
1975 // received a compact phase finish msg
1976 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1977 // non startup core can not receive this msg
1980 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1982 BAMBOO_EXIT(0xb003);
1984 int cnum = msgdata[1];
1985 int filledblocks = msgdata[2];
1986 int heaptop = msgdata[3];
1987 int data4 = msgdata[4];
1988 if(cnum < NUMCORES4GC) {
1989 if(COMPACTPHASE == gcphase) {
1990 gcfilledblocks[cnum] = filledblocks;
1991 gcloads[cnum] = heaptop;
1998 if(gcfindSpareMem_I(&startaddr, &tomove, &dstcore, data4, cnum)) {
2000 cache_msg_4(cnum, GCMOVESTART, dstcore, startaddr, tomove);
2002 send_msg_4(cnum, GCMOVESTART, dstcore, startaddr, tomove);
2006 gccorestatus[cnum] = 0;
2007 // check if there is pending move request
2008 /*if(gcmovepending > 0) {
2010 for(j = 0; j < NUMCORES4GC; j++) {
2011 if(gcrequiredmems[j]>0) {
2015 if(j < NUMCORES4GC) {
2019 gcrequiredmems[j] = assignSpareMem_I(cnum,
2023 if(STARTUPCORE == j) {
2026 gcmovestartaddr = startaddr;
2027 gcblock2fill = tomove;
2030 cache_msg_4(j, GCMOVESTART, cnum, startaddr, tomove);
2032 send_msg_4(j, GCMOVESTART, cnum, startaddr, tomove);
2034 } // if(STARTUPCORE == j)
2035 if(gcrequiredmems[j] == 0) {
2038 } // if(j < NUMCORES4GC)
2039 } // if(gcmovepending > 0) */
2041 } // if(cnum < NUMCORES4GC)
2045 case GCFINISHFLUSH: {
2046 // received a flush phase finish msg
2047 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
2048 // non startup core can not receive this msg
2051 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
2053 BAMBOO_EXIT(0xb004);
2055 if(msgdata[1] < NUMCORES4GC) {
2056 gccorestatus[msgdata[1]] = 0;
2062 // received a GC finish msg
2063 gcphase = FINISHPHASE;
2067 case GCMARKCONFIRM: {
2068 // received a marked phase finish confirm request msg
2069 if((BAMBOO_NUM_OF_CORE == STARTUPCORE)
2070 || (BAMBOO_NUM_OF_CORE > NUMCORES4GC - 1)) {
2071 // wrong core to receive such msg
2072 BAMBOO_EXIT(0xb005);
2074 // send response msg
2076 cache_msg_5(STARTUPCORE, GCMARKREPORT, BAMBOO_NUM_OF_CORE,
2077 gcbusystatus, gcself_numsendobjs,
2078 gcself_numreceiveobjs);
2080 send_msg_5(STARTUPCORE, GCMARKREPORT, BAMBOO_NUM_OF_CORE,
2081 gcbusystatus, gcself_numsendobjs, gcself_numreceiveobjs);
2087 case GCMARKREPORT: {
2088 // received a marked phase finish confirm response msg
2089 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
2090 // wrong core to receive such msg
2092 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
2094 BAMBOO_EXIT(0xb006);
2099 gccorestatus[msgdata[1]] = msgdata[2];
2100 gcnumsendobjs[msgdata[1]] = msgdata[3];
2101 gcnumreceiveobjs[msgdata[1]] = msgdata[4];
2107 // received a markedObj msg
2108 gc_enqueue_I(msgdata[1]);
2109 gcself_numreceiveobjs++;
2110 gcbusystatus = true;
2115 // received a start moving objs msg
2117 gcdstcore = msgdata[1];
2118 gcmovestartaddr = msgdata[2];
2119 gcblock2fill = msgdata[3];
2123 case GCMAPREQUEST: {
2124 // received a mapping info request msg
2125 void * dstptr = NULL;
2126 RuntimeHashget(gcpointertbl, msgdata[1], &dstptr);
2127 if(NULL == dstptr) {
2128 // no such pointer in this core, something is wrong
2130 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
2131 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
2133 BAMBOO_EXIT(0xb007);
2135 // send back the mapping info
2137 cache_msg_3(msgdata[2], GCMAPINFO, msgdata[1], (int)dstptr);
2139 send_msg_3(msgdata[2], GCMAPINFO, msgdata[1], (int)dstptr);
2146 // received a mapping info response msg
2147 if(msgdata[1] != gcobj2map) {
2148 // obj not matched, something is wrong
2150 BAMBOO_DEBUGPRINT_REG(gcobj2map);
2151 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
2153 BAMBOO_EXIT(0xb008);
2155 gcmappedobj = msgdata[2];
2156 RuntimeHashadd_I(gcpointertbl, gcobj2map, gcmappedobj);
2162 case GCLOBJREQUEST: {
2163 // received a large objs info request msg
2164 transferMarkResults_I();
2169 // received a large objs info response msg
2172 if(BAMBOO_NUM_OF_CORE > NUMCORES4GC - 1) {
2174 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
2176 BAMBOO_EXIT(0xb009);
2178 // store the mark result info
2179 int cnum = msgdata[2];
2180 gcloads[cnum] = msgdata[3];
2181 if(gcheaptop < msgdata[4]) {
2182 gcheaptop = msgdata[4];
2184 // large obj info here
2185 for(int k = 5; k < msgdata[1];) {
2186 int lobj = msgdata[k++];
2187 int length = msgdata[k++];
2188 gc_lobjenqueue_I(lobj, length, cnum);
2190 } // for(int k = 5; k < msgdata[1];)
2194 case GCLOBJMAPPING: {
2195 // received a large obj mapping info msg
2196 RuntimeHashadd_I(gcpointertbl, msgdata[1], msgdata[2]);
2205 for(; msgdataindex > 0; --msgdataindex) {
2206 msgdata[msgdataindex-1] = -1;
2208 msglength = BAMBOO_MSG_BUF_LENGTH;
2211 BAMBOO_DEBUGPRINT(0xe88d);
2215 if(BAMBOO_MSG_AVAIL() != 0) {
2228 BAMBOO_DEBUGPRINT(0xe88e);
2232 /* if(isInterrupt) {
2240 int enqueuetasks(struct parameterwrapper *parameter,
2241 struct parameterwrapper *prevptr,
2242 struct ___Object___ *ptr,
2244 int numenterflags) {
2245 void * taskpointerarray[MAXTASKPARAMS];
2247 //int numparams=parameter->task->numParameters;
2248 int numiterators=parameter->task->numTotal-1;
2251 struct taskdescriptor * task=parameter->task;
2253 //this add the object to parameterwrapper
2254 ObjectHashadd(parameter->objectset, (int) ptr, 0, (int) enterflags,
2255 numenterflags, enterflags==NULL);
2257 /* Add enqueued object to parameter vector */
2258 taskpointerarray[parameter->slot]=ptr;
2260 /* Reset iterators */
2261 for(j=0; j<numiterators; j++) {
2262 toiReset(¶meter->iterators[j]);
2265 /* Find initial state */
2266 for(j=0; j<numiterators; j++) {
2268 if(toiHasNext(¶meter->iterators[j],taskpointerarray OPTARG(failed)))
2269 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2271 /* Need to backtrack */
2272 toiReset(¶meter->iterators[j]);
2276 /* Nothing to enqueue */
2282 /* Enqueue current state */
2284 struct taskparamdescriptor *tpd=
2285 RUNMALLOC(sizeof(struct taskparamdescriptor));
2287 tpd->numParameters=numiterators+1;
2288 tpd->parameterArray=RUNMALLOC(sizeof(void *)*(numiterators+1));
2290 for(j=0; j<=numiterators; j++) {
2291 //store the actual parameters
2292 tpd->parameterArray[j]=taskpointerarray[j];
2295 if ((/*!gencontains(failedtasks, tpd)&&*/
2296 !gencontains(activetasks,tpd))) {
2297 genputtable(activetasks, tpd, tpd);
2299 RUNFREE(tpd->parameterArray);
2303 /* This loop iterates to the next parameter combination */
2304 if (numiterators==0)
2307 for(j=numiterators-1; j<numiterators; j++) {
2309 if(toiHasNext(¶meter->iterators[j],taskpointerarray OPTARG(failed)))
2310 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2312 /* Need to backtrack */
2313 toiReset(¶meter->iterators[j]);
2317 /* Nothing more to enqueue */
2325 int enqueuetasks_I(struct parameterwrapper *parameter,
2326 struct parameterwrapper *prevptr,
2327 struct ___Object___ *ptr,
2329 int numenterflags) {
2330 void * taskpointerarray[MAXTASKPARAMS];
2332 //int numparams=parameter->task->numParameters;
2333 int numiterators=parameter->task->numTotal-1;
2338 struct taskdescriptor * task=parameter->task;
2340 //this add the object to parameterwrapper
2341 ObjectHashadd_I(parameter->objectset, (int) ptr, 0, (int) enterflags,
2342 numenterflags, enterflags==NULL);
2344 /* Add enqueued object to parameter vector */
2345 taskpointerarray[parameter->slot]=ptr;
2347 /* Reset iterators */
2348 for(j=0; j<numiterators; j++) {
2349 toiReset(¶meter->iterators[j]);
2352 /* Find initial state */
2353 for(j=0; j<numiterators; j++) {
2355 if(toiHasNext(¶meter->iterators[j],taskpointerarray OPTARG(failed)))
2356 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2358 /* Need to backtrack */
2359 toiReset(¶meter->iterators[j]);
2363 /* Nothing to enqueue */
2369 /* Enqueue current state */
2371 struct taskparamdescriptor *tpd=
2372 RUNMALLOC_I(sizeof(struct taskparamdescriptor));
2374 tpd->numParameters=numiterators+1;
2375 tpd->parameterArray=RUNMALLOC_I(sizeof(void *)*(numiterators+1));
2377 for(j=0; j<=numiterators; j++) {
2378 //store the actual parameters
2379 tpd->parameterArray[j]=taskpointerarray[j];
2382 if ((/*!gencontains(failedtasks, tpd)&&*/
2383 !gencontains(activetasks,tpd))) {
2384 genputtable_I(activetasks, tpd, tpd);
2386 RUNFREE(tpd->parameterArray);
2390 /* This loop iterates to the next parameter combination */
2391 if (numiterators==0)
2394 for(j=numiterators-1; j<numiterators; j++) {
2396 if(toiHasNext(¶meter->iterators[j], taskpointerarray OPTARG(failed)))
2397 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2399 /* Need to backtrack */
2400 toiReset(¶meter->iterators[j]);
2404 /* Nothing more to enqueue */
2418 int containstag(struct ___Object___ *ptr,
2419 struct ___TagDescriptor___ *tag);
2421 #ifndef MULTICORE_GC
2422 void releasewritelock_r(void * lock, void * redirectlock) {
2424 int reallock = (int)lock;
2425 targetcore = (reallock >> 5) % BAMBOO_TOTALCORE;
2428 BAMBOO_DEBUGPRINT(0xe671);
2429 BAMBOO_DEBUGPRINT_REG((int)lock);
2430 BAMBOO_DEBUGPRINT_REG(reallock);
2431 BAMBOO_DEBUGPRINT_REG(targetcore);
2434 if(targetcore == BAMBOO_NUM_OF_CORE) {
2435 BAMBOO_START_CRITICAL_SECTION_LOCK();
2437 BAMBOO_DEBUGPRINT(0xf001);
2439 // reside on this core
2440 if(!RuntimeHashcontainskey(locktbl, reallock)) {
2441 // no locks for this object, something is wrong
2442 BAMBOO_EXIT(0xa011);
2445 struct LockValue * lockvalue = NULL;
2447 BAMBOO_DEBUGPRINT(0xe672);
2449 RuntimeHashget(locktbl, reallock, &rwlock_obj);
2450 lockvalue = (struct LockValue *)rwlock_obj;
2452 BAMBOO_DEBUGPRINT_REG(lockvalue->value);
2455 lockvalue->redirectlock = (int)redirectlock;
2457 BAMBOO_DEBUGPRINT_REG(lockvalue->value);
2460 BAMBOO_CLOSE_CRITICAL_SECTION_LOCK();
2462 BAMBOO_DEBUGPRINT(0xf000);
2466 // send lock release with redirect info msg
2467 // for 32 bit machine, the size is always 4 words
2468 send_msg_4(targetcore, REDIRECTRELEASE, 1, (int)lock, (int)redirectlock);
2473 void executetasks() {
2474 void * taskpointerarray[MAXTASKPARAMS+OFFSET];
2477 struct ___Object___ * tmpparam = NULL;
2478 struct parameterdescriptor * pd=NULL;
2479 struct parameterwrapper *pw=NULL;
2489 while(hashsize(activetasks)>0) {
2494 BAMBOO_DEBUGPRINT(0xe990);
2497 /* See if there are any active tasks */
2498 if (hashsize(activetasks)>0) {
2501 #ifdef ACCURATEPROFILE
2502 profileTaskStart("tpd checking");
2506 currtpd=(struct taskparamdescriptor *) getfirstkey(activetasks);
2507 genfreekey(activetasks, currtpd);
2509 numparams=currtpd->task->numParameters;
2510 numtotal=currtpd->task->numTotal;
2512 // clear the lockRedirectTbl
2513 // (TODO, this table should be empty after all locks are released)
2515 for(j = 0; j < MAXTASKPARAMS; j++) {
2516 runtime_locks[j].redirectlock = 0;
2517 runtime_locks[j].value = 0;
2519 // get all required locks
2520 runtime_locklen = 0;
2521 // check which locks are needed
2522 for(i = 0; i < numparams; i++) {
2523 void * param = currtpd->parameterArray[i];
2527 if(((struct ___Object___ *)param)->type == STARTUPTYPE) {
2529 taskpointerarray[i+OFFSET]=param;
2532 if(((struct ___Object___ *)param)->lock == NULL) {
2533 tmplock = (int)param;
2535 tmplock = (int)(((struct ___Object___ *)param)->lock);
2537 // insert into the locks array
2538 for(j = 0; j < runtime_locklen; j++) {
2539 if(runtime_locks[j].value == tmplock) {
2542 } else if(runtime_locks[j].value > tmplock) {
2547 int h = runtime_locklen;
2549 runtime_locks[h].redirectlock = runtime_locks[h-1].redirectlock;
2550 runtime_locks[h].value = runtime_locks[h-1].value;
2552 runtime_locks[j].value = tmplock;
2553 runtime_locks[j].redirectlock = (int)param;
2556 } // line 2713: for(i = 0; i < numparams; i++)
2557 // grab these required locks
2559 BAMBOO_DEBUGPRINT(0xe991);
2561 for(i = 0; i < runtime_locklen; i++) {
2562 int * lock = (int *)(runtime_locks[i].redirectlock);
2564 // require locks for this parameter if it is not a startup object
2566 BAMBOO_DEBUGPRINT_REG((int)lock);
2567 BAMBOO_DEBUGPRINT_REG((int)(runtime_locks[i].value));
2570 BAMBOO_START_CRITICAL_SECTION();
2572 BAMBOO_DEBUGPRINT(0xf001);
2575 //isInterrupt = false;
2578 BAMBOO_WAITING_FOR_LOCK();
2582 while(BAMBOO_WAITING_FOR_LOCK() != -1) {
2586 grount = lockresult;
2596 //isInterrupt = true;
2598 BAMBOO_CLOSE_CRITICAL_SECTION();
2600 BAMBOO_DEBUGPRINT(0xf000);
2606 BAMBOO_DEBUGPRINT(0xe992);
2607 BAMBOO_DEBUGPRINT_REG(lock);
2609 // can not get the lock, try later
2610 // releas all grabbed locks for previous parameters
2611 for(j = 0; j < i; ++j) {
2612 lock = (int*)(runtime_locks[j].redirectlock);
2613 releasewritelock(lock);
2615 genputtable(activetasks, currtpd, currtpd);
2616 if(hashsize(activetasks) == 1) {
2617 // only one task right now, wait a little while before next try
2623 #ifdef ACCURATEPROFILE
2624 // fail, set the end of the checkTaskInfo
2629 } // line 2794: if(grount == 0)
2630 } // line 2752: for(i = 0; i < runtime_locklen; i++)
2633 BAMBOO_DEBUGPRINT(0xe993);
2635 /* Make sure that the parameters are still in the queues */
2636 for(i=0; i<numparams; i++) {
2637 void * parameter=currtpd->parameterArray[i];
2641 BAMBOO_CACHE_FLUSH_RANGE((int)parameter,
2642 classsize[((struct ___Object___ *)parameter)->type]);
2644 tmpparam = (struct ___Object___ *)parameter;
2645 pd=currtpd->task->descriptorarray[i];
2646 pw=(struct parameterwrapper *) pd->queue;
2647 /* Check that object is still in queue */
2649 if (!ObjectHashcontainskey(pw->objectset, (int) parameter)) {
2651 BAMBOO_DEBUGPRINT(0xe994);
2652 BAMBOO_DEBUGPRINT_REG(parameter);
2654 // release grabbed locks
2655 for(j = 0; j < runtime_locklen; ++j) {
2656 int * lock = (int *)(runtime_locks[j].redirectlock);
2657 releasewritelock(lock);
2659 RUNFREE(currtpd->parameterArray);
2665 /* Check if the object's flags still meets requirements */
2669 for(tmpi = 0; tmpi < pw->numberofterms; ++tmpi) {
2670 andmask=pw->intarray[tmpi*2];
2671 checkmask=pw->intarray[tmpi*2+1];
2672 if((((struct ___Object___ *)parameter)->flag&andmask)==checkmask) {
2678 // flags are never suitable
2679 // remove this obj from the queue
2681 int UNUSED, UNUSED2;
2684 BAMBOO_DEBUGPRINT(0xe995);
2685 BAMBOO_DEBUGPRINT_REG(parameter);
2687 ObjectHashget(pw->objectset, (int) parameter, (int *) &next,
2688 (int *) &enterflags, &UNUSED, &UNUSED2);
2689 ObjectHashremove(pw->objectset, (int)parameter);
2690 if (enterflags!=NULL)
2691 RUNFREE(enterflags);
2692 // release grabbed locks
2693 for(j = 0; j < runtime_locklen; ++j) {
2694 int * lock = (int *)(runtime_locks[j].redirectlock);
2695 releasewritelock(lock);
2697 RUNFREE(currtpd->parameterArray);
2701 #ifdef ACCURATEPROFILE
2702 // fail, set the end of the checkTaskInfo
2707 } // line 2878: if (!ismet)
2711 /* Check that object still has necessary tags */
2712 for(j=0; j<pd->numbertags; j++) {
2713 int slotid=pd->tagarray[2*j]+numparams;
2714 struct ___TagDescriptor___ *tagd=currtpd->parameterArray[slotid];
2715 if (!containstag(parameter, tagd)) {
2717 BAMBOO_DEBUGPRINT(0xe996);
2720 // release grabbed locks
2722 for(tmpj = 0; tmpj < runtime_locklen; ++tmpj) {
2723 int * lock = (int *)(runtime_locks[tmpj].redirectlock);
2724 releasewritelock(lock);
2727 RUNFREE(currtpd->parameterArray);
2731 } // line2911: if (!containstag(parameter, tagd))
2732 } // line 2808: for(j=0; j<pd->numbertags; j++)
2734 taskpointerarray[i+OFFSET]=parameter;
2735 } // line 2824: for(i=0; i<numparams; i++)
2737 for(; i<numtotal; i++) {
2738 taskpointerarray[i+OFFSET]=currtpd->parameterArray[i];
2743 /* Actually call task */
2745 ((int *)taskpointerarray)[0]=currtpd->numParameters;
2746 taskpointerarray[1]=NULL;
2749 #ifdef ACCURATEPROFILE
2750 // check finish, set the end of the checkTaskInfo
2753 profileTaskStart(currtpd->task->name);
2757 BAMBOO_DEBUGPRINT(0xe997);
2759 ((void(*) (void **))currtpd->task->taskptr)(taskpointerarray);
2761 #ifdef ACCURATEPROFILE
2762 // task finish, set the end of the checkTaskInfo
2764 // new a PostTaskInfo for the post-task execution
2765 profileTaskStart("post task execution");
2769 BAMBOO_DEBUGPRINT(0xe998);
2770 BAMBOO_DEBUGPRINT_REG(islock);
2775 BAMBOO_DEBUGPRINT(0xe999);
2777 for(i = 0; i < runtime_locklen; ++i) {
2778 void * ptr = (void *)(runtime_locks[i].redirectlock);
2779 int * lock = (int *)(runtime_locks[i].value);
2781 BAMBOO_DEBUGPRINT_REG((int)ptr);
2782 BAMBOO_DEBUGPRINT_REG((int)lock);
2783 BAMBOO_DEBUGPRINT_REG(*((int*)lock+5));
2785 #ifndef MULTICORE_GC
2786 if(RuntimeHashcontainskey(lockRedirectTbl, (int)lock)) {
2788 RuntimeHashget(lockRedirectTbl, (int)lock, &redirectlock);
2789 RuntimeHashremovekey(lockRedirectTbl, (int)lock);
2790 releasewritelock_r(lock, (int *)redirectlock);
2795 releasewritelock(ptr);
2798 } // line 3015: if(islock)
2801 // post task execution finish, set the end of the postTaskInfo
2805 // Free up task parameter descriptor
2806 RUNFREE(currtpd->parameterArray);
2810 BAMBOO_DEBUGPRINT(0xe99a);
2813 } // if (hashsize(activetasks)>0)
2814 } // while(hashsize(activetasks)>0)
2816 BAMBOO_DEBUGPRINT(0xe99b);
2820 /* This function processes an objects tags */
2821 void processtags(struct parameterdescriptor *pd,
2823 struct parameterwrapper *parameter,
2824 int * iteratorcount,
2829 for(i=0; i<pd->numbertags; i++) {
2830 int slotid=pd->tagarray[2*i];
2831 int tagid=pd->tagarray[2*i+1];
2833 if (statusarray[slotid+numparams]==0) {
2834 parameter->iterators[*iteratorcount].istag=1;
2835 parameter->iterators[*iteratorcount].tagid=tagid;
2836 parameter->iterators[*iteratorcount].slot=slotid+numparams;
2837 parameter->iterators[*iteratorcount].tagobjectslot=index;
2838 statusarray[slotid+numparams]=1;
2845 void processobject(struct parameterwrapper *parameter,
2847 struct parameterdescriptor *pd,
2853 struct ObjectHash * objectset=
2854 ((struct parameterwrapper *)pd->queue)->objectset;
2856 parameter->iterators[*iteratorcount].istag=0;
2857 parameter->iterators[*iteratorcount].slot=index;
2858 parameter->iterators[*iteratorcount].objectset=objectset;
2859 statusarray[index]=1;
2861 for(i=0; i<pd->numbertags; i++) {
2862 int slotid=pd->tagarray[2*i];
2863 //int tagid=pd->tagarray[2*i+1];
2864 if (statusarray[slotid+numparams]!=0) {
2865 /* This tag has already been enqueued, use it to narrow search */
2866 parameter->iterators[*iteratorcount].tagbindings[tagcount]=
2871 parameter->iterators[*iteratorcount].numtags=tagcount;
2876 /* This function builds the iterators for a task & parameter */
2878 void builditerators(struct taskdescriptor * task,
2880 struct parameterwrapper * parameter) {
2881 int statusarray[MAXTASKPARAMS];
2883 int numparams=task->numParameters;
2884 int iteratorcount=0;
2885 for(i=0; i<MAXTASKPARAMS; i++) statusarray[i]=0;
2887 statusarray[index]=1; /* Initial parameter */
2888 /* Process tags for initial iterator */
2890 processtags(task->descriptorarray[index], index, parameter,
2891 &iteratorcount, statusarray, numparams);
2895 /* Check for objects with existing tags */
2896 for(i=0; i<numparams; i++) {
2897 if (statusarray[i]==0) {
2898 struct parameterdescriptor *pd=task->descriptorarray[i];
2900 for(j=0; j<pd->numbertags; j++) {
2901 int slotid=pd->tagarray[2*j];
2902 if(statusarray[slotid+numparams]!=0) {
2903 processobject(parameter, i, pd, &iteratorcount, statusarray,
2905 processtags(pd, i, parameter, &iteratorcount, statusarray, numparams);
2912 /* Next do objects w/ unbound tags*/
2914 for(i=0; i<numparams; i++) {
2915 if (statusarray[i]==0) {
2916 struct parameterdescriptor *pd=task->descriptorarray[i];
2917 if (pd->numbertags>0) {
2918 processobject(parameter, i, pd, &iteratorcount, statusarray, numparams);
2919 processtags(pd, i, parameter, &iteratorcount, statusarray, numparams);
2925 /* Nothing with a tag enqueued */
2927 for(i=0; i<numparams; i++) {
2928 if (statusarray[i]==0) {
2929 struct parameterdescriptor *pd=task->descriptorarray[i];
2930 processobject(parameter, i, pd, &iteratorcount, statusarray, numparams);
2931 processtags(pd, i, parameter, &iteratorcount, statusarray, numparams);
2944 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
2947 for(i=0; i<numtasks[BAMBOO_NUM_OF_CORE]; i++) {
2948 struct taskdescriptor * task=taskarray[BAMBOO_NUM_OF_CORE][i];
2950 printf("%s\n", task->name);
2952 for(j=0; j<task->numParameters; j++) {
2953 struct parameterdescriptor *param=task->descriptorarray[j];
2954 struct parameterwrapper *parameter=param->queue;
2955 struct ObjectHash * set=parameter->objectset;
2956 struct ObjectIterator objit;
2958 printf(" Parameter %d\n", j);
2960 ObjectHashiterator(set, &objit);
2961 while(ObjhasNext(&objit)) {
2962 struct ___Object___ * obj=(struct ___Object___ *)Objkey(&objit);
2963 struct ___Object___ * tagptr=obj->___tags___;
2964 int nonfailed=Objdata4(&objit);
2965 int numflags=Objdata3(&objit);
2966 int flags=Objdata2(&objit);
2969 printf(" Contains %lx\n", obj);
2970 printf(" flag=%d\n", obj->flag);
2973 } else if (tagptr->type==TAGTYPE) {
2975 printf(" tag=%lx\n",tagptr);
2981 struct ArrayObject *ao=(struct ArrayObject *)tagptr;
2982 for(; tagindex<ao->___cachedCode___; tagindex++) {
2984 printf(" tag=%lx\n",ARRAYGET(ao, struct ___TagDescriptor___*,
2997 /* This function processes the task information to create queues for
2998 each parameter type. */
3000 void processtasks() {
3002 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
3005 for(i=0; i<numtasks[BAMBOO_NUM_OF_CORE]; i++) {
3006 struct taskdescriptor * task=taskarray[BAMBOO_NUM_OF_CORE][i];
3009 /* Build objectsets */
3010 for(j=0; j<task->numParameters; j++) {
3011 struct parameterdescriptor *param=task->descriptorarray[j];
3012 struct parameterwrapper *parameter=param->queue;
3013 parameter->objectset=allocateObjectHash(10);
3014 parameter->task=task;
3017 /* Build iterators for parameters */
3018 for(j=0; j<task->numParameters; j++) {
3019 struct parameterdescriptor *param=task->descriptorarray[j];
3020 struct parameterwrapper *parameter=param->queue;
3021 builditerators(task, j, parameter);
3026 void toiReset(struct tagobjectiterator * it) {
3029 } else if (it->numtags>0) {
3032 ObjectHashiterator(it->objectset, &it->it);
3036 int toiHasNext(struct tagobjectiterator *it,
3037 void ** objectarray OPTARG(int * failed)) {
3040 /* Get object with tags */
3041 struct ___Object___ *obj=objectarray[it->tagobjectslot];
3042 struct ___Object___ *tagptr=obj->___tags___;
3043 if (tagptr->type==TAGTYPE) {
3044 if ((it->tagobjindex==0)&& /* First object */
3045 (it->tagid==((struct ___TagDescriptor___ *)tagptr)->flag)) /* Right tag type */
3050 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
3051 int tagindex=it->tagobjindex;
3052 for(; tagindex<ao->___cachedCode___; tagindex++) {
3053 struct ___TagDescriptor___ *td=
3054 ARRAYGET(ao, struct ___TagDescriptor___ *, tagindex);
3055 if (td->flag==it->tagid) {
3056 it->tagobjindex=tagindex; /* Found right type of tag */
3062 } else if (it->numtags>0) {
3063 /* Use tags to locate appropriate objects */
3064 struct ___TagDescriptor___ *tag=objectarray[it->tagbindings[0]];
3065 struct ___Object___ *objptr=tag->flagptr;
3067 if (objptr->type!=OBJECTARRAYTYPE) {
3068 if (it->tagobjindex>0)
3070 if (!ObjectHashcontainskey(it->objectset, (int) objptr))
3072 for(i=1; i<it->numtags; i++) {
3073 struct ___TagDescriptor___ *tag2=objectarray[it->tagbindings[i]];
3074 if (!containstag(objptr,tag2))
3079 struct ArrayObject *ao=(struct ArrayObject *) objptr;
3082 for(tagindex=it->tagobjindex;tagindex<ao->___cachedCode___;tagindex++) {
3083 struct ___Object___ *objptr=ARRAYGET(ao, struct ___Object___*, tagindex);
3084 if (!ObjectHashcontainskey(it->objectset, (int) objptr))
3086 for(i=1; i<it->numtags; i++) {
3087 struct ___TagDescriptor___ *tag2=objectarray[it->tagbindings[i]];
3088 if (!containstag(objptr,tag2))
3091 it->tagobjindex=tagindex;
3096 it->tagobjindex=tagindex;
3100 return ObjhasNext(&it->it);
3104 int containstag(struct ___Object___ *ptr,
3105 struct ___TagDescriptor___ *tag) {
3107 struct ___Object___ * objptr=tag->flagptr;
3108 if (objptr->type==OBJECTARRAYTYPE) {
3109 struct ArrayObject *ao=(struct ArrayObject *)objptr;
3110 for(j=0; j<ao->___cachedCode___; j++) {
3111 if (ptr==ARRAYGET(ao, struct ___Object___*, j)) {
3121 void toiNext(struct tagobjectiterator *it,
3122 void ** objectarray OPTARG(int * failed)) {
3123 /* hasNext has all of the intelligence */
3126 /* Get object with tags */
3127 struct ___Object___ *obj=objectarray[it->tagobjectslot];
3128 struct ___Object___ *tagptr=obj->___tags___;
3129 if (tagptr->type==TAGTYPE) {
3131 objectarray[it->slot]=tagptr;
3133 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
3134 objectarray[it->slot]=
3135 ARRAYGET(ao, struct ___TagDescriptor___ *, it->tagobjindex++);
3137 } else if (it->numtags>0) {
3138 /* Use tags to locate appropriate objects */
3139 struct ___TagDescriptor___ *tag=objectarray[it->tagbindings[0]];
3140 struct ___Object___ *objptr=tag->flagptr;
3141 if (objptr->type!=OBJECTARRAYTYPE) {
3143 objectarray[it->slot]=objptr;
3145 struct ArrayObject *ao=(struct ArrayObject *) objptr;
3146 objectarray[it->slot]=
3147 ARRAYGET(ao, struct ___Object___ *, it->tagobjindex++);
3150 /* Iterate object */
3151 objectarray[it->slot]=(void *)Objkey(&it->it);