3 #include "multicoreruntime.h"
4 #include "runtime_arch.h"
5 #include "GenericHashtable.h"
7 // data structures for task invocation
8 struct genhashtable * activetasks;
9 struct taskparamdescriptor * currtpd;
11 // specific functions used inside critical sections
12 void enqueueObject_I(void * ptr,
13 struct parameterwrapper ** queues,
15 int enqueuetasks_I(struct parameterwrapper *parameter,
16 struct parameterwrapper *prevptr,
17 struct ___Object___ *ptr,
22 inline __attribute__((always_inline))
23 void setupsmemmode(void) {
25 bamboo_smem_mode = SMEMLOCAL;
27 bamboo_smem_mode = SMEMFIXED;
29 bamboo_smem_mode = SMEMMIXED;
31 bamboo_smem_mode = SMEMGLOBAL;
33 // defaultly using local mode
34 bamboo_smem_mode = SMEMLOCAL;
36 } // void setupsmemmode(void)
39 inline __attribute__((always_inline))
40 void initruntimedata() {
42 // initialize the arrays
43 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
44 // startup core to initialize corestatus[]
45 for(i = 0; i < NUMCORES; ++i) {
48 numreceiveobjs[i] = 0;
50 // initialize the profile data arrays
56 gcnumreceiveobjs[i] = 0;
58 gcrequiredmems[i] = 0;
60 gcfilledblocks[i] = 0;
62 } // for(i = 0; i < NUMCORES; ++i)
72 self_numreceiveobjs = 0;
74 for(i = 0; i < BAMBOO_MSG_BUF_LENGTH; ++i) {
79 msglength = BAMBOO_MSG_BUF_LENGTH;
80 for(i = 0; i < BAMBOO_OUT_BUF_LENGTH; ++i) {
90 bamboo_cur_msp = NULL;
92 totransobjqueue = createQueue();
97 gcphase = FINISHPHASE;
99 gcself_numsendobjs = 0;
100 gcself_numreceiveobjs = 0;
101 gcmarkedptrbound = 0;
102 gcpointertbl = allocateRuntimeHash(20);
114 gcsbstarttbl = BAMBOO_BASE_VA;
115 gcsmemtbl = RUNMALLOC_I(sizeof(int)*gcnumblock);
117 // create the lock table, lockresult table and obj queue
120 (struct RuntimeNode **) RUNMALLOC_I(sizeof(struct RuntimeNode *)*20);
121 /* Set allocation blocks*/
122 locktable.listhead=NULL;
123 locktable.listtail=NULL;
125 locktable.numelements = 0;
130 lockRedirectTbl = allocateRuntimeHash(20);
131 objRedirectLockTbl = allocateRuntimeHash(20);
136 objqueue.head = NULL;
137 objqueue.tail = NULL;
143 //isInterrupt = true;
146 taskInfoOverflow = false;
147 /*interruptInfoIndex = 0;
148 interruptInfoOverflow = false;*/
152 inline __attribute__((always_inline))
153 void disruntimedata() {
155 freeRuntimeHash(gcpointertbl);
157 freeRuntimeHash(lockRedirectTbl);
158 freeRuntimeHash(objRedirectLockTbl);
159 RUNFREE(locktable.bucket);
161 genfreehashtable(activetasks);
162 if(currtpd != NULL) {
163 RUNFREE(currtpd->parameterArray);
169 inline __attribute__((always_inline))
170 bool checkObjQueue() {
172 struct transObjInfo * objInfo = NULL;
176 #ifdef ACCURATEPROFILE
177 bool isChecking = false;
178 if(!isEmpty(&objqueue)) {
179 profileTaskStart("objqueue checking");
181 } // if(!isEmpty(&objqueue))
185 while(!isEmpty(&objqueue)) {
187 BAMBOO_START_CRITICAL_SECTION_OBJ_QUEUE();
189 BAMBOO_DEBUGPRINT(0xf001);
192 //isInterrupt = false;
195 BAMBOO_DEBUGPRINT(0xeee1);
198 objInfo = (struct transObjInfo *)getItem(&objqueue);
199 obj = objInfo->objptr;
201 BAMBOO_DEBUGPRINT_REG((int)obj);
203 // grab lock and flush the obj
207 BAMBOO_WAITING_FOR_LOCK();
208 } // while(!lockflag)
211 BAMBOO_DEBUGPRINT_REG(grount);
226 BAMBOO_CACHE_FLUSH_RANGE((int)obj,sizeof(int));
227 BAMBOO_CACHE_FLUSH_RANGE((int)obj,
228 classsize[((struct ___Object___ *)obj)->type]);
230 // enqueue the object
231 for(k = 0; k < objInfo->length; ++k) {
232 int taskindex = objInfo->queues[2 * k];
233 int paramindex = objInfo->queues[2 * k + 1];
234 struct parameterwrapper ** queues =
235 &(paramqueues[BAMBOO_NUM_OF_CORE][taskindex][paramindex]);
237 BAMBOO_DEBUGPRINT_REG(taskindex);
238 BAMBOO_DEBUGPRINT_REG(paramindex);
239 struct ___Object___ * tmpptr = (struct ___Object___ *)obj;
240 tprintf("Process %x(%d): receive obj %x(%lld), ptrflag %x\n",
241 BAMBOO_NUM_OF_CORE, BAMBOO_NUM_OF_CORE, (int)obj,
242 (long)obj, tmpptr->flag);
244 enqueueObject_I(obj, queues, 1);
246 BAMBOO_DEBUGPRINT_REG(hashsize(activetasks));
248 } // for(k = 0; k < objInfo->length; ++k)
249 releasewritelock_I(obj);
250 RUNFREE(objInfo->queues);
254 // put it at the end of the queue if no update version in the queue
255 struct QueueItem * qitem = getHead(&objqueue);
256 struct QueueItem * prev = NULL;
257 while(qitem != NULL) {
258 struct transObjInfo * tmpinfo =
259 (struct transObjInfo *)(qitem->objectptr);
260 if(tmpinfo->objptr == obj) {
261 // the same object in the queue, which should be enqueued
262 // recently. Current one is outdate, do not re-enqueue it
263 RUNFREE(objInfo->queues);
268 } // if(tmpinfo->objptr == obj)
269 qitem = getNextQueueItem(prev);
270 } // while(qitem != NULL)
271 // try to execute active tasks already enqueued first
272 addNewItem_I(&objqueue, objInfo);
274 //isInterrupt = true;
277 BAMBOO_CLOSE_CRITICAL_SECTION_OBJ_QUEUE();
279 BAMBOO_DEBUGPRINT(0xf000);
283 BAMBOO_CLOSE_CRITICAL_SECTION_OBJ_QUEUE();
285 BAMBOO_DEBUGPRINT(0xf000);
287 } // while(!isEmpty(&objqueue))
290 #ifdef ACCURATEPROFILE
298 BAMBOO_DEBUGPRINT(0xee02);
303 inline __attribute__((always_inline))
304 void checkCoreStatus() {
305 bool allStall = false;
309 (waitconfirm && (numconfirm == 0))) {
311 BAMBOO_DEBUGPRINT(0xee04);
312 BAMBOO_DEBUGPRINT_REG(waitconfirm);
314 BAMBOO_START_CRITICAL_SECTION_STATUS();
316 BAMBOO_DEBUGPRINT(0xf001);
318 corestatus[BAMBOO_NUM_OF_CORE] = 0;
319 numsendobjs[BAMBOO_NUM_OF_CORE] = self_numsendobjs;
320 numreceiveobjs[BAMBOO_NUM_OF_CORE] = self_numreceiveobjs;
321 // check the status of all cores
324 BAMBOO_DEBUGPRINT_REG(NUMCORES);
326 for(i = 0; i < NUMCORES; ++i) {
328 BAMBOO_DEBUGPRINT(0xe000 + corestatus[i]);
330 if(corestatus[i] != 0) {
334 } // for(i = 0; i < NUMCORES; ++i)
336 // check if the sum of send objs and receive obj are the same
337 // yes->check if the info is the latest; no->go on executing
339 for(i = 0; i < NUMCORES; ++i) {
340 sumsendobj += numsendobjs[i];
342 BAMBOO_DEBUGPRINT(0xf000 + numsendobjs[i]);
344 } // for(i = 0; i < NUMCORES; ++i)
345 for(i = 0; i < NUMCORES; ++i) {
346 sumsendobj -= numreceiveobjs[i];
348 BAMBOO_DEBUGPRINT(0xf000 + numreceiveobjs[i]);
350 } // for(i = 0; i < NUMCORES; ++i)
351 if(0 == sumsendobj) {
353 // the first time found all cores stall
354 // send out status confirm msg to all other cores
355 // reset the corestatus array too
357 BAMBOO_DEBUGPRINT(0xee05);
359 corestatus[BAMBOO_NUM_OF_CORE] = 1;
360 for(i = 1; i < NUMCORES; ++i) {
362 // send status confirm msg to core i
363 send_msg_1(i, STATUSCONFIRM);
364 } // for(i = 1; i < NUMCORES; ++i)
366 numconfirm = NUMCORES - 1;
368 // all the core status info are the latest
369 // terminate; for profiling mode, send request to all
370 // other cores to pour out profiling data
372 BAMBOO_DEBUGPRINT(0xee06);
376 totalexetime = BAMBOO_GET_EXE_TIME();
378 BAMBOO_DEBUGPRINT(BAMBOO_GET_EXE_TIME());
379 BAMBOO_DEBUGPRINT_REG(total_num_t6); // TODO for test
380 BAMBOO_DEBUGPRINT(0xbbbbbbbb);
382 // profile mode, send msgs to other cores to request pouring
383 // out progiling data
385 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
387 BAMBOO_DEBUGPRINT(0xf000);
389 for(i = 1; i < NUMCORES; ++i) {
390 // send profile request msg to core i
391 send_msg_2(i, PROFILEOUTPUT, totalexetime);
392 } // for(i = 1; i < NUMCORES; ++i)
393 // pour profiling data on startup core
396 BAMBOO_START_CRITICAL_SECTION_STATUS();
398 BAMBOO_DEBUGPRINT(0xf001);
400 profilestatus[BAMBOO_NUM_OF_CORE] = 0;
401 // check the status of all cores
404 BAMBOO_DEBUGPRINT_REG(NUMCORES);
406 for(i = 0; i < NUMCORES; ++i) {
408 BAMBOO_DEBUGPRINT(0xe000 + profilestatus[i]);
410 if(profilestatus[i] != 0) {
414 } // for(i = 0; i < NUMCORES; ++i)
417 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
419 BAMBOO_DEBUGPRINT(0xf000);
429 terminate(); // All done.
430 } // if(!waitconfirm)
432 // still some objects on the fly on the network
433 // reset the waitconfirm and numconfirm
435 BAMBOO_DEBUGPRINT(0xee07);
439 } // if(0 == sumsendobj)
441 // not all cores are stall, keep on waiting
443 BAMBOO_DEBUGPRINT(0xee08);
448 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
450 BAMBOO_DEBUGPRINT(0xf000);
452 } // if((!waitconfirm) ||
455 // main function for each core
456 inline void run(void * arg) {
460 bool sendStall = false;
462 bool tocontinue = false;
464 corenum = BAMBOO_GET_NUM_OF_CORE();
466 BAMBOO_DEBUGPRINT(0xeeee);
467 BAMBOO_DEBUGPRINT_REG(corenum);
468 BAMBOO_DEBUGPRINT(STARTUPCORE);
471 // initialize runtime data structures
474 // other architecture related initialization
478 initializeexithandler();
480 // main process of the execution module
481 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
482 // non-executing cores, only processing communications
485 BAMBOO_DEBUGPRINT(0xee01);
486 BAMBOO_DEBUGPRINT_REG(taskInfoIndex);
487 BAMBOO_DEBUGPRINT_REG(taskInfoOverflow);
488 profileTaskStart("msg handling");
492 //isInterrupt = false;
496 /* Create queue of active tasks */
498 genallocatehashtable((unsigned int(*) (void *)) &hashCodetpd,
499 (int(*) (void *,void *)) &comparetpd);
501 /* Process task information */
504 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
505 /* Create startup object */
506 createstartupobject(argc, argv);
510 BAMBOO_DEBUGPRINT(0xee00);
515 // check if need to do GC
519 // check if there are new active tasks can be executed
526 while(receiveObject() != -1) {
531 BAMBOO_DEBUGPRINT(0xee01);
534 // check if there are some pending objects,
535 // if yes, enqueue them and executetasks again
536 tocontinue = checkObjQueue();
540 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
543 BAMBOO_DEBUGPRINT(0xee03);
551 BAMBOO_DEBUGPRINT(0xee09);
557 // wait for some time
560 BAMBOO_DEBUGPRINT(0xee0a);
566 // send StallMsg to startup core
568 BAMBOO_DEBUGPRINT(0xee0b);
571 send_msg_4(STARTUPCORE, TRANSTALL, BAMBOO_NUM_OF_CORE,
572 self_numsendobjs, self_numreceiveobjs);
584 BAMBOO_DEBUGPRINT(0xee0c);
587 } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE)
590 } // if(BAMBOO_NUM_OF_CORE > NUMCORES - 1)
594 struct ___createstartupobject____I_locals {
597 struct ___StartupObject___ * ___startupobject___;
598 struct ArrayObject * ___stringarray___;
599 }; // struct ___createstartupobject____I_locals
601 void createstartupobject(int argc,
605 /* Allocate startup object */
607 struct ___createstartupobject____I_locals ___locals___={2, NULL, NULL, NULL};
608 struct ___StartupObject___ *startupobject=
609 (struct ___StartupObject___*) allocate_new(&___locals___, STARTUPTYPE);
610 ___locals___.___startupobject___ = startupobject;
611 struct ArrayObject * stringarray=
612 allocate_newarray(&___locals___, STRINGARRAYTYPE, argc-1);
613 ___locals___.___stringarray___ = stringarray;
615 struct ___StartupObject___ *startupobject=
616 (struct ___StartupObject___*) allocate_new(STARTUPTYPE);
617 struct ArrayObject * stringarray=
618 allocate_newarray(STRINGARRAYTYPE, argc-1);
620 /* Build array of strings */
621 startupobject->___parameters___=stringarray;
622 for(i=1; i<argc; i++) {
623 int length=strlen(argv[i]);
625 struct ___String___ *newstring=NewString(&___locals___, argv[i],length);
627 struct ___String___ *newstring=NewString(argv[i],length);
629 ((void **)(((char *)&stringarray->___length___)+sizeof(int)))[i-1]=
633 startupobject->version = 0;
634 startupobject->lock = NULL;
636 /* Set initialized flag for startup object */
637 flagorandinit(startupobject,1,0xFFFFFFFF);
638 enqueueObject(startupobject, NULL, 0);
640 BAMBOO_CACHE_FLUSH_ALL();
644 int hashCodetpd(struct taskparamdescriptor *ftd) {
645 int hash=(int)ftd->task;
647 for(i=0; i<ftd->numParameters; i++) {
648 hash^=(int)ftd->parameterArray[i];
653 int comparetpd(struct taskparamdescriptor *ftd1,
654 struct taskparamdescriptor *ftd2) {
656 if (ftd1->task!=ftd2->task)
658 for(i=0; i<ftd1->numParameters; i++)
659 if(ftd1->parameterArray[i]!=ftd2->parameterArray[i])
664 /* This function sets a tag. */
666 void tagset(void *ptr,
667 struct ___Object___ * obj,
668 struct ___TagDescriptor___ * tagd) {
670 void tagset(struct ___Object___ * obj,
671 struct ___TagDescriptor___ * tagd) {
673 struct ArrayObject * ao=NULL;
674 struct ___Object___ * tagptr=obj->___tags___;
676 obj->___tags___=(struct ___Object___ *)tagd;
678 /* Have to check if it is already set */
679 if (tagptr->type==TAGTYPE) {
680 struct ___TagDescriptor___ * td=(struct ___TagDescriptor___ *) tagptr;
685 int ptrarray[]={2, (int) ptr, (int) obj, (int)tagd};
686 struct ArrayObject * ao=
687 allocate_newarray(&ptrarray,TAGARRAYTYPE,TAGARRAYINTERVAL);
688 obj=(struct ___Object___ *)ptrarray[2];
689 tagd=(struct ___TagDescriptor___ *)ptrarray[3];
690 td=(struct ___TagDescriptor___ *) obj->___tags___;
692 ao=allocate_newarray(TAGARRAYTYPE,TAGARRAYINTERVAL);
695 ARRAYSET(ao, struct ___TagDescriptor___ *, 0, td);
696 ARRAYSET(ao, struct ___TagDescriptor___ *, 1, tagd);
697 obj->___tags___=(struct ___Object___ *) ao;
698 ao->___cachedCode___=2;
702 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
703 for(i=0; i<ao->___cachedCode___; i++) {
704 struct ___TagDescriptor___ * td=
705 ARRAYGET(ao, struct ___TagDescriptor___*, i);
710 if (ao->___cachedCode___<ao->___length___) {
711 ARRAYSET(ao, struct ___TagDescriptor___ *, ao->___cachedCode___, tagd);
712 ao->___cachedCode___++;
715 int ptrarray[]={2,(int) ptr, (int) obj, (int) tagd};
716 struct ArrayObject * aonew=
717 allocate_newarray(&ptrarray,TAGARRAYTYPE,
718 TAGARRAYINTERVAL+ao->___length___);
719 obj=(struct ___Object___ *)ptrarray[2];
720 tagd=(struct ___TagDescriptor___ *) ptrarray[3];
721 ao=(struct ArrayObject *)obj->___tags___;
723 struct ArrayObject * aonew=
724 allocate_newarray(TAGARRAYTYPE,TAGARRAYINTERVAL+ao->___length___);
727 aonew->___cachedCode___=ao->___length___+1;
728 for(i=0; i<ao->___length___; i++) {
729 ARRAYSET(aonew, struct ___TagDescriptor___*, i,
730 ARRAYGET(ao, struct ___TagDescriptor___*, i));
732 ARRAYSET(aonew, struct ___TagDescriptor___ *, ao->___length___, tagd);
738 struct ___Object___ * tagset=tagd->flagptr;
741 } else if (tagset->type!=OBJECTARRAYTYPE) {
743 int ptrarray[]={2, (int) ptr, (int) obj, (int)tagd};
744 struct ArrayObject * ao=
745 allocate_newarray(&ptrarray,OBJECTARRAYTYPE,OBJECTARRAYINTERVAL);
746 obj=(struct ___Object___ *)ptrarray[2];
747 tagd=(struct ___TagDescriptor___ *)ptrarray[3];
749 struct ArrayObject * ao=
750 allocate_newarray(OBJECTARRAYTYPE,OBJECTARRAYINTERVAL);
752 ARRAYSET(ao, struct ___Object___ *, 0, tagd->flagptr);
753 ARRAYSET(ao, struct ___Object___ *, 1, obj);
754 ao->___cachedCode___=2;
755 tagd->flagptr=(struct ___Object___ *)ao;
757 struct ArrayObject *ao=(struct ArrayObject *) tagset;
758 if (ao->___cachedCode___<ao->___length___) {
759 ARRAYSET(ao, struct ___Object___*, ao->___cachedCode___++, obj);
763 int ptrarray[]={2, (int) ptr, (int) obj, (int)tagd};
764 struct ArrayObject * aonew=
765 allocate_newarray(&ptrarray,OBJECTARRAYTYPE,
766 OBJECTARRAYINTERVAL+ao->___length___);
767 obj=(struct ___Object___ *)ptrarray[2];
768 tagd=(struct ___TagDescriptor___ *)ptrarray[3];
769 ao=(struct ArrayObject *)tagd->flagptr;
771 struct ArrayObject * aonew=
772 allocate_newarray(OBJECTARRAYTYPE,OBJECTARRAYINTERVAL);
774 aonew->___cachedCode___=ao->___cachedCode___+1;
775 for(i=0; i<ao->___length___; i++) {
776 ARRAYSET(aonew, struct ___Object___*, i,
777 ARRAYGET(ao, struct ___Object___*, i));
779 ARRAYSET(aonew, struct ___Object___ *, ao->___cachedCode___, obj);
780 tagd->flagptr=(struct ___Object___ *) aonew;
786 /* This function clears a tag. */
788 void tagclear(void *ptr,
789 struct ___Object___ * obj,
790 struct ___TagDescriptor___ * tagd) {
792 void tagclear(struct ___Object___ * obj,
793 struct ___TagDescriptor___ * tagd) {
795 /* We'll assume that tag is alway there.
796 Need to statically check for this of course. */
797 struct ___Object___ * tagptr=obj->___tags___;
799 if (tagptr->type==TAGTYPE) {
800 if ((struct ___TagDescriptor___ *)tagptr==tagd)
801 obj->___tags___=NULL;
803 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
805 for(i=0; i<ao->___cachedCode___; i++) {
806 struct ___TagDescriptor___ * td=
807 ARRAYGET(ao, struct ___TagDescriptor___ *, i);
809 ao->___cachedCode___--;
810 if (i<ao->___cachedCode___)
811 ARRAYSET(ao, struct ___TagDescriptor___ *, i,
812 ARRAYGET(ao, struct ___TagDescriptor___ *, ao->___cachedCode___));
813 ARRAYSET(ao, struct ___TagDescriptor___ *, ao->___cachedCode___, NULL);
814 if (ao->___cachedCode___==0)
815 obj->___tags___=NULL;
822 struct ___Object___ *tagset=tagd->flagptr;
823 if (tagset->type!=OBJECTARRAYTYPE) {
827 struct ArrayObject *ao=(struct ArrayObject *) tagset;
829 for(i=0; i<ao->___cachedCode___; i++) {
830 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, i);
832 ao->___cachedCode___--;
833 if (i<ao->___cachedCode___)
834 ARRAYSET(ao, struct ___Object___ *, i,
835 ARRAYGET(ao, struct ___Object___ *, ao->___cachedCode___));
836 ARRAYSET(ao, struct ___Object___ *, ao->___cachedCode___, NULL);
837 if (ao->___cachedCode___==0)
848 /* This function allocates a new tag. */
850 struct ___TagDescriptor___ * allocate_tag(void *ptr,
852 struct ___TagDescriptor___ * v=
853 (struct ___TagDescriptor___ *) FREEMALLOC((struct garbagelist *) ptr,
856 struct ___TagDescriptor___ * allocate_tag(int index) {
857 struct ___TagDescriptor___ * v=FREEMALLOC(classsize[TAGTYPE]);
866 /* This function updates the flag for object ptr. It or's the flag
867 with the or mask and and's it with the andmask. */
869 void flagbody(struct ___Object___ *ptr,
871 struct parameterwrapper ** queues,
875 int flagcomp(const int *val1, const int *val2) {
876 return (*val1)-(*val2);
879 void flagorand(void * ptr,
882 struct parameterwrapper ** queues,
885 int oldflag=((int *)ptr)[1];
886 int flag=ormask|oldflag;
888 flagbody(ptr, flag, queues, length, false);
892 bool intflagorand(void * ptr,
896 int oldflag=((int *)ptr)[1];
897 int flag=ormask|oldflag;
899 if (flag==oldflag) /* Don't do anything */
902 flagbody(ptr, flag, NULL, 0, false);
908 void flagorandinit(void * ptr,
911 int oldflag=((int *)ptr)[1];
912 int flag=ormask|oldflag;
914 flagbody(ptr,flag,NULL,0,true);
917 void flagbody(struct ___Object___ *ptr,
919 struct parameterwrapper ** vqueues,
922 struct parameterwrapper * flagptr = NULL;
924 struct parameterwrapper ** queues = vqueues;
925 int length = vlength;
928 int * enterflags = NULL;
929 if((!isnew) && (queues == NULL)) {
930 if(BAMBOO_NUM_OF_CORE < NUMCORES) {
931 queues = objectqueues[BAMBOO_NUM_OF_CORE][ptr->type];
932 length = numqueues[BAMBOO_NUM_OF_CORE][ptr->type];
939 /*Remove object from all queues */
940 for(i = 0; i < length; ++i) {
942 ObjectHashget(flagptr->objectset, (int) ptr, (int *) &next,
943 (int *) &enterflags, &UNUSED, &UNUSED2);
944 ObjectHashremove(flagptr->objectset, (int)ptr);
945 if (enterflags!=NULL)
950 void enqueueObject(void * vptr,
951 struct parameterwrapper ** vqueues,
953 struct ___Object___ *ptr = (struct ___Object___ *)vptr;
956 //struct QueueItem *tmpptr;
957 struct parameterwrapper * parameter=NULL;
960 struct parameterwrapper * prevptr=NULL;
961 struct ___Object___ *tagptr=NULL;
962 struct parameterwrapper ** queues = vqueues;
963 int length = vlength;
964 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
968 queues = objectqueues[BAMBOO_NUM_OF_CORE][ptr->type];
969 length = numqueues[BAMBOO_NUM_OF_CORE][ptr->type];
971 tagptr=ptr->___tags___;
973 /* Outer loop iterates through all parameter queues an object of
974 this type could be in. */
975 for(j = 0; j < length; ++j) {
976 parameter = queues[j];
978 if (parameter->numbertags>0) {
980 goto nextloop; //that means the object has no tag
981 //but that param needs tag
982 else if(tagptr->type==TAGTYPE) { //one tag
983 //struct ___TagDescriptor___ * tag=
984 //(struct ___TagDescriptor___*) tagptr;
985 for(i=0; i<parameter->numbertags; i++) {
986 //slotid is parameter->tagarray[2*i];
987 int tagid=parameter->tagarray[2*i+1];
988 if (tagid!=tagptr->flag)
989 goto nextloop; /*We don't have this tag */
991 } else { //multiple tags
992 struct ArrayObject * ao=(struct ArrayObject *) tagptr;
993 for(i=0; i<parameter->numbertags; i++) {
994 //slotid is parameter->tagarray[2*i];
995 int tagid=parameter->tagarray[2*i+1];
997 for(j=0; j<ao->___cachedCode___; j++) {
998 if (tagid==ARRAYGET(ao, struct ___TagDescriptor___*, j)->flag)
1009 for(i=0; i<parameter->numberofterms; i++) {
1010 int andmask=parameter->intarray[i*2];
1011 int checkmask=parameter->intarray[i*2+1];
1012 if ((ptr->flag&andmask)==checkmask) {
1013 enqueuetasks(parameter, prevptr, ptr, NULL, 0);
1024 void enqueueObject_I(void * vptr,
1025 struct parameterwrapper ** vqueues,
1027 struct ___Object___ *ptr = (struct ___Object___ *)vptr;
1030 //struct QueueItem *tmpptr;
1031 struct parameterwrapper * parameter=NULL;
1034 struct parameterwrapper * prevptr=NULL;
1035 struct ___Object___ *tagptr=NULL;
1036 struct parameterwrapper ** queues = vqueues;
1037 int length = vlength;
1038 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
1041 if(queues == NULL) {
1042 queues = objectqueues[BAMBOO_NUM_OF_CORE][ptr->type];
1043 length = numqueues[BAMBOO_NUM_OF_CORE][ptr->type];
1045 tagptr=ptr->___tags___;
1047 /* Outer loop iterates through all parameter queues an object of
1048 this type could be in. */
1049 for(j = 0; j < length; ++j) {
1050 parameter = queues[j];
1052 if (parameter->numbertags>0) {
1054 goto nextloop; //that means the object has no tag
1055 //but that param needs tag
1056 else if(tagptr->type==TAGTYPE) { //one tag
1057 //struct ___TagDescriptor___ * tag=(struct ___TagDescriptor___*) tagptr;
1058 for(i=0; i<parameter->numbertags; i++) {
1059 //slotid is parameter->tagarray[2*i];
1060 int tagid=parameter->tagarray[2*i+1];
1061 if (tagid!=tagptr->flag)
1062 goto nextloop; /*We don't have this tag */
1064 } else { //multiple tags
1065 struct ArrayObject * ao=(struct ArrayObject *) tagptr;
1066 for(i=0; i<parameter->numbertags; i++) {
1067 //slotid is parameter->tagarray[2*i];
1068 int tagid=parameter->tagarray[2*i+1];
1070 for(j=0; j<ao->___cachedCode___; j++) {
1071 if (tagid==ARRAYGET(ao, struct ___TagDescriptor___*, j)->flag)
1082 for(i=0; i<parameter->numberofterms; i++) {
1083 int andmask=parameter->intarray[i*2];
1084 int checkmask=parameter->intarray[i*2+1];
1085 if ((ptr->flag&andmask)==checkmask) {
1086 enqueuetasks_I(parameter, prevptr, ptr, NULL, 0);
1098 int * getAliasLock(void ** ptrs,
1100 struct RuntimeHash * tbl) {
1102 return (int*)(RUNMALLOC(sizeof(int)));
1107 bool redirect = false;
1108 int redirectlock = 0;
1109 for(; i < length; i++) {
1110 struct ___Object___ * ptr = (struct ___Object___ *)(ptrs[i]);
1113 if(ptr->lock == NULL) {
1116 lock = (int)(ptr->lock);
1119 if(lock != redirectlock) {
1120 RuntimeHashadd(tbl, lock, redirectlock);
1123 if(RuntimeHashcontainskey(tbl, lock)) {
1124 // already redirected
1126 RuntimeHashget(tbl, lock, &redirectlock);
1127 for(; j < locklen; j++) {
1128 if(locks[j] != redirectlock) {
1129 RuntimeHashadd(tbl, locks[j], redirectlock);
1134 for(j = 0; j < locklen; j++) {
1135 if(locks[j] == lock) {
1138 } else if(locks[j] > lock) {
1145 locks[h] = locks[h-1];
1154 return (int *)redirectlock;
1156 return (int *)(locks[0]);
1161 void addAliasLock(void * ptr,
1163 struct ___Object___ * obj = (struct ___Object___ *)ptr;
1164 if(((int)ptr != lock) && (obj->lock != (int*)lock)) {
1165 // originally no alias lock associated or have a different alias lock
1166 // flush it as the new one
1167 obj->lock = (int *)lock;
1172 inline void setTaskExitIndex(int index) {
1173 taskInfoArray[taskInfoIndex]->exitIndex = index;
1176 inline void addNewObjInfo(void * nobj) {
1177 if(taskInfoArray[taskInfoIndex]->newObjs == NULL) {
1178 taskInfoArray[taskInfoIndex]->newObjs = createQueue();
1180 addNewItem(taskInfoArray[taskInfoIndex]->newObjs, nobj);
1184 struct freeMemItem * findFreeMemChunk(int coren,
1187 struct freeMemItem * freemem = bamboo_free_mem_list->head;
1188 struct freeMemItem * prev = NULL;
1191 *tofindb = gc_core2block[2*coren+i]+124*j;
1192 // check available shared mem chunks
1195 switch(bamboo_smem_mode) {
1197 int startb = freemem->startblock;
1198 int endb = freemem->endblock;
1199 while(startb > *tofindb) {
1205 *tofindb = gc_core2block[2*coren+i]+124*j;
1206 } // while(startb > tofindb)
1207 if(startb <= *tofindb) {
1208 if((endb >= *tofindb) && (freemem->size >= isize)) {
1210 } else if(*tofindb > gcnumblock-1) {
1211 // no more local mem
1213 } // if(endb >= tofindb)
1214 } // if(startb <= tofindb)
1219 int startb = freemem->startblock;
1220 int endb = freemem->endblock;
1221 if(startb <= *tofindb) {
1222 if((endb >= *tofindb) && (freemem->size >= isize)) {
1226 // use the global mem
1227 if(((startb > NUMCORES-1) && (freemem->size >= isize)) ||
1228 ((endb > NUMCORES-1) && ((freemem->size-
1229 (gcbaseva+BAMBOO_LARGE_SMEM_BOUND-freemem->ptr))>=isize))) {
1237 // TODO not supported yet
1238 BAMBOO_EXIT(0xe001);
1243 foundsmem = (freemem->size >= isize);
1250 if(1 == foundsmem) {
1253 } else if (2 == foundsmem) {
1254 // terminate, no more mem
1259 freemem = freemem->next;
1260 } while(freemem != NULL);
1263 } // struct freeMemItem * findFreeMemChunk(int, int, int *)
1265 void * localmalloc(int tofindb,
1267 struct freeMemItem * freemem,
1270 int startb = freemem->startblock;
1271 int endb = freemem->endblock;
1272 int tmpptr = gcbaseva+((tofindb<NUMCORES)?tofindb*BAMBOO_SMEM_SIZE_L
1273 :BAMBOO_LARGE_SMEM_BOUND+(tofindb-NUMCORES)*BAMBOO_SMEM_SIZE);
1274 if((freemem->size+freemem->ptr-tmpptr)>=isize) {
1275 mem = (tmpptr>freemem->ptr)?((void *)tmpptr):(freemem->ptr);
1277 mem = (void *)(freemem->size+freemem->ptr-isize);
1279 // check the remaining space in this block
1280 int remain = (int)(mem-gcbaseva);
1281 int bound = (BAMBOO_SMEM_SIZE);
1282 if(remain < BAMBOO_LARGE_SMEM_BOUND) {
1283 bound = (BAMBOO_SMEM_SIZE_L);
1285 remain = bound - remain%bound;
1286 if(remain < isize) {
1287 // this object acrosses blocks
1290 // round the asigned block to the end of the current block
1291 *allocsize = remain;
1293 if(freemem->ptr == (int)mem) {
1294 freemem->ptr = ((void*)freemem->ptr) + (*allocsize);
1295 freemem->size -= *allocsize;
1296 BLOCKINDEX(freemem->ptr, &(freemem->startblock));
1297 } else if((freemem->ptr+freemem->size) == ((int)mem+(*allocsize))) {
1298 freemem->size -= *allocsize;
1299 BLOCKINDEX(((int)mem)-1, &(freemem->endblock));
1301 struct freeMemItem * tmp =
1302 (struct freeMemItem *)RUNMALLOC(sizeof(struct freeMemItem));
1303 tmp->ptr = (int)mem+*allocsize;
1304 tmp->size = freemem->ptr+freemem->size-(int)mem-*allocsize;
1305 BLOCKINDEX(tmp->ptr, &(tmp->startblock));
1306 tmp->endblock = freemem->endblock;
1307 tmp->next = freemem->next;
1308 freemem->next = tmp;
1309 freemem->size = (int)mem - freemem->ptr;
1310 BLOCKINDEX(((int)mem-1), &(freemem->endblock));
1313 } // void * localmalloc(int, int, struct freeMemItem *, int *)
1315 void * globalmalloc(int isize,
1316 struct freeMemItem * freemem,
1318 void * mem = (void *)(freemem->ptr);
1319 // check the remaining space in this block
1320 int remain = (int)(mem-(BAMBOO_BASE_VA));
1321 int bound = (BAMBOO_SMEM_SIZE);
1322 if(remain < BAMBOO_LARGE_SMEM_BOUND) {
1323 bound = (BAMBOO_SMEM_SIZE_L);
1325 remain = bound - remain%bound;
1326 if(remain < isize) {
1327 // this object acrosses blocks
1330 // round the asigned block to the end of the current block
1331 *allocsize = remain;
1333 freemem->ptr = ((void*)freemem->ptr) + (*allocsize);
1334 freemem->size -= *allocsize;
1336 } // void * globalmalloc(int, struct freeMemItem *, int *)
1338 // malloc from the shared memory
1339 void * smemalloc(int coren,
1343 int isize = size+(BAMBOO_CACHE_LINE_SIZE);
1344 int toallocate = ((size+(BAMBOO_CACHE_LINE_SIZE))>(BAMBOO_SMEM_SIZE)) ?
1345 (size+(BAMBOO_CACHE_LINE_SIZE)):(BAMBOO_SMEM_SIZE);
1347 // go through free mem list for suitable chunks
1349 struct freeMemItem * freemem = findFreeMemChunk(coren, isize, &tofindb);
1351 // allocate shared mem if available
1352 if(freemem != NULL) {
1353 switch(bamboo_smem_mode) {
1355 mem = localmalloc(tofindb, isize, freemem, allocsize);
1360 int startb = freemem->startblock;
1361 int endb = freemem->endblock;
1362 if(startb > tofindb) {
1363 // malloc on global mem
1364 mem = globalmalloc(isize, freemem, allocsize);
1366 // malloc on local mem
1367 mem = localmalloc(tofindb, isize, freemem, allocsize);
1373 // TODO not supported yet
1374 BAMBOO_EXIT(0xe002);
1379 mem = globalmalloc(isize,freemem, allocsize);
1388 mem = mspace_calloc(bamboo_free_msp, 1, isize);
1392 // no enough shared global memory
1398 BAMBOO_DEBUGPRINT(0xa001);
1399 BAMBOO_EXIT(0xa001);
1405 // receive object transferred from other cores
1406 // or the terminate message from other cores
1407 // Should be invoked in critical sections!!
1408 // NOTICE: following format is for threadsimulate version only
1409 // RAW version please see previous description
1410 // format: type + object
1411 // type: -1--stall msg
1413 // return value: 0--received an object
1414 // 1--received nothing
1415 // 2--received a Stall Msg
1416 // 3--received a lock Msg
1417 // RAW version: -1 -- received nothing
1418 // otherwise -- received msg type
1419 int receiveObject() {
1423 if(receiveMsg() == -1) {
1427 if(msgdataindex == msglength) {
1428 // received a whole msg
1433 // receive a object transfer msg
1434 struct transObjInfo * transObj =
1435 RUNMALLOC_I(sizeof(struct transObjInfo));
1439 BAMBOO_DEBUGPRINT(0xe880);
1442 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
1444 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1446 BAMBOO_EXIT(0xa002);
1448 // store the object and its corresponding queue info, enqueue it later
1449 transObj->objptr = (void *)msgdata[2];
1450 transObj->length = (msglength - 3) / 2;
1451 transObj->queues = RUNMALLOC_I(sizeof(int)*(msglength - 3));
1452 for(k = 0; k < transObj->length; ++k) {
1453 transObj->queues[2*k] = msgdata[3+2*k];
1456 BAMBOO_DEBUGPRINT_REG(transObj->queues[2*k]);
1459 transObj->queues[2*k+1] = msgdata[3+2*k+1];
1462 BAMBOO_DEBUGPRINT_REG(transObj->queues[2*k+1]);
1466 // check if there is an existing duplicate item
1468 struct QueueItem * qitem = getHead(&objqueue);
1469 struct QueueItem * prev = NULL;
1470 while(qitem != NULL) {
1471 struct transObjInfo * tmpinfo =
1472 (struct transObjInfo *)(qitem->objectptr);
1473 if(tmpinfo->objptr == transObj->objptr) {
1474 // the same object, remove outdate one
1475 removeItem(&objqueue, qitem);
1481 qitem = getHead(&objqueue);
1483 qitem = getNextQueueItem(prev);
1486 addNewItem_I(&objqueue, (void *)transObj);
1488 ++(self_numreceiveobjs);
1493 // receive a stall msg
1494 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1495 // non startup core can not receive stall msg
1497 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1499 BAMBOO_EXIT(0xa003);
1501 if(msgdata[1] < NUMCORES) {
1504 BAMBOO_DEBUGPRINT(0xe881);
1507 corestatus[msgdata[1]] = 0;
1508 numsendobjs[msgdata[1]] = msgdata[2];
1509 numreceiveobjs[msgdata[1]] = msgdata[3];
1514 // GC version have no lock msgs
1515 #ifndef MULTICORE_GC
1517 // receive lock request msg, handle it right now
1518 // check to see if there is a lock exist for the required obj
1519 // msgdata[1] -> lock type
1520 int data2 = msgdata[2]; // obj pointer
1521 int data3 = msgdata[3]; // lock
1522 int data4 = msgdata[4]; // request core
1523 // -1: redirected, 0: approved, 1: denied
1524 deny = processlockrequest(msgdata[1], data3, data2,
1525 data4, data4, true);
1527 // this lock request is redirected
1530 // send response msg
1531 // for 32 bit machine, the size is always 4 words
1532 int tmp = deny==1?LOCKDENY:LOCKGROUNT;
1534 cache_msg_4(data4, tmp, msgdata[1], data2, data3);
1536 send_msg_4(data4, tmp, msgdata[1], data2, data3);
1543 // receive lock grount msg
1544 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
1546 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1548 BAMBOO_EXIT(0xa004);
1550 if((lockobj == msgdata[2]) && (lock2require == msgdata[3])) {
1553 BAMBOO_DEBUGPRINT(0xe882);
1562 // conflicts on lockresults
1564 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1566 BAMBOO_EXIT(0xa005);
1572 // receive lock deny msg
1573 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
1575 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1577 BAMBOO_EXIT(0xa006);
1579 if((lockobj == msgdata[2]) && (lock2require == msgdata[3])) {
1582 BAMBOO_DEBUGPRINT(0xe883);
1591 // conflicts on lockresults
1593 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1595 BAMBOO_EXIT(0xa007);
1601 // receive lock release msg
1602 processlockrelease(msgdata[1], msgdata[2], 0, false);
1608 case PROFILEOUTPUT: {
1609 // receive an output profile data request msg
1610 if(BAMBOO_NUM_OF_CORE == STARTUPCORE) {
1611 // startup core can not receive profile output finish msg
1612 BAMBOO_EXIT(0xa008);
1616 BAMBOO_DEBUGPRINT(0xe885);
1620 totalexetime = msgdata[1];
1621 outputProfileData();
1623 cache_msg_2(STARTUPCORE, PROFILEFINISH, BAMBOO_NUM_OF_CORE);
1625 send_msg_2(STARTUPCORE, PROFILEFINISH, BAMBOO_NUM_OF_CORE);
1630 case PROFILEFINISH: {
1631 // receive a profile output finish msg
1632 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1633 // non startup core can not receive profile output finish msg
1635 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1637 BAMBOO_EXIT(0xa009);
1641 BAMBOO_DEBUGPRINT(0xe886);
1644 profilestatus[msgdata[1]] = 0;
1649 // GC version has no lock msgs
1650 #ifndef MULTICORE_GC
1651 case REDIRECTLOCK: {
1652 // receive a redirect lock request msg, handle it right now
1653 // check to see if there is a lock exist for the required obj
1654 int data1 = msgdata[1]; // lock type
1655 int data2 = msgdata[2]; // obj pointer
1656 int data3 = msgdata[3]; // redirect lock
1657 int data4 = msgdata[4]; // root request core
1658 int data5 = msgdata[5]; // request core
1659 deny = processlockrequest(msgdata[1], data3, data2, data5, data4, true);
1661 // this lock request is redirected
1664 // send response msg
1665 // for 32 bit machine, the size is always 4 words
1667 cache_msg_4(data4, deny==1?REDIRECTDENY:REDIRECTGROUNT,
1668 data1, data2, data3);
1670 send_msg_4(data4, deny==1?REDIRECTDENY:REDIRECTGROUNT,
1671 data1, data2, data3);
1677 case REDIRECTGROUNT: {
1678 // receive a lock grant msg with redirect info
1679 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
1681 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1683 BAMBOO_EXIT(0xa00a);
1685 if(lockobj == msgdata[2]) {
1688 BAMBOO_DEBUGPRINT(0xe891);
1693 RuntimeHashadd_I(objRedirectLockTbl, lockobj, msgdata[3]);
1698 // conflicts on lockresults
1700 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1702 BAMBOO_EXIT(0xa00b);
1707 case REDIRECTDENY: {
1708 // receive a lock deny msg with redirect info
1709 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
1711 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1713 BAMBOO_EXIT(0xa00c);
1715 if(lockobj == msgdata[2]) {
1718 BAMBOO_DEBUGPRINT(0xe892);
1727 // conflicts on lockresults
1729 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1731 BAMBOO_EXIT(0xa00d);
1736 case REDIRECTRELEASE: {
1737 // receive a lock release msg with redirect info
1738 processlockrelease(msgdata[1], msgdata[2], msgdata[3], true);
1743 case STATUSCONFIRM: {
1744 // receive a status confirm info
1745 if((BAMBOO_NUM_OF_CORE == STARTUPCORE)
1746 || (BAMBOO_NUM_OF_CORE > NUMCORES - 1)) {
1747 // wrong core to receive such msg
1748 BAMBOO_EXIT(0xa00e);
1750 // send response msg
1753 BAMBOO_DEBUGPRINT(0xe887);
1757 cache_msg_5(STARTUPCORE, STATUSREPORT,
1758 busystatus?1:0, BAMBOO_NUM_OF_CORE,
1759 self_numsendobjs, self_numreceiveobjs);
1761 send_msg_5(STARTUPCORE, STATUSREPORT,
1762 busystatus?1:0, BAMBOO_NUM_OF_CORE,
1763 self_numsendobjs, self_numreceiveobjs);
1769 case STATUSREPORT: {
1770 // receive a status confirm info
1771 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1772 // wrong core to receive such msg
1774 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1776 BAMBOO_EXIT(0xa00f);
1780 BAMBOO_DEBUGPRINT(0xe888);
1786 corestatus[msgdata[2]] = msgdata[1];
1787 numsendobjs[msgdata[2]] = msgdata[3];
1788 numreceiveobjs[msgdata[2]] = msgdata[4];
1794 // receive a terminate msg
1797 BAMBOO_DEBUGPRINT(0xe889);
1806 // receive a shared memory request msg
1807 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1808 // wrong core to receive such msg
1810 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1812 BAMBOO_EXIT(0xa010);
1816 BAMBOO_DEBUGPRINT(0xe88a);
1821 // is currently doing gc, dump this msg
1826 void * mem = smemalloc(msgdata[2], msgdata[1], &allocsize);
1830 // send the start_va to request core
1832 cache_msg_3(msgdata[2], MEMRESPONSE, mem, allocsize);
1834 send_msg_3( msgdata[2], MEMRESPONSE, mem, allocsize);
1841 // receive a shared memory response msg
1844 BAMBOO_DEBUGPRINT(0xe88b);
1849 // is currently doing gc, dump this msg
1853 if(msgdata[2] == 0) {
1854 bamboo_smem_size = 0;
1857 // fill header to store the size of this mem block
1858 (*((int*)msgdata[1])) = msgdata[2];
1859 bamboo_smem_size = msgdata[2] - BAMBOO_CACHE_LINE_SIZE;
1861 bamboo_cur_msp = msgdata[1] + BAMBOO_CACHE_LINE_SIZE;
1864 create_mspace_with_base((void*)(msgdata[1]+BAMBOO_CACHE_LINE_SIZE),
1865 msgdata[2]-BAMBOO_CACHE_LINE_SIZE,
1877 gcphase = INITPHASE;
1879 // is waiting for response of mem request
1880 // let it return NULL and start gc
1881 bamboo_smem_size = 0;
1882 bamboo_cur_msp = NULL;
1889 // receive a start GC msg
1892 BAMBOO_DEBUGPRINT(0xe88c);
1896 gcphase = MARKPHASE;
1900 case GCSTARTCOMPACT: {
1901 // a compact phase start msg
1902 gcblock2fill = msgdata[1];
1903 gcphase = COMPACTPHASE;
1907 case GCSTARTFLUSH: {
1908 // received a flush phase start msg
1909 gcphase = FLUSHPHASE;
1913 case GCFINISHINIT: {
1914 // received a init phase finish msg
1915 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1916 // non startup core can not receive this msg
1918 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1920 BAMBOO_EXIT(0xb001);
1923 BAMBOO_DEBUGPRINT(0xe88c);
1924 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1926 if(msgdata[1] < NUMCORES) {
1927 gccorestatus[msgdata[1]] = 0;
1931 case GCFINISHMARK: {
1932 // received a mark phase finish msg
1933 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1934 // non startup core can not receive this msg
1936 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1938 BAMBOO_EXIT(0xb002);
1940 if(msgdata[1] < NUMCORES) {
1941 gccorestatus[msgdata[1]] = 0;
1942 gcnumsendobjs[msgdata[1]] = msgdata[2];
1943 gcnumreceiveobjs[msgdata[1]] = msgdata[3];
1948 case GCFINISHCOMPACT: {
1949 // received a compact phase finish msg
1950 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1951 // non startup core can not receive this msg
1954 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1956 BAMBOO_EXIT(0xb003);
1958 int cnum = msgdata[1];
1959 int filledblocks = msgdata[2];
1960 int heaptop = msgdata[3];
1961 int data4 = msgdata[4];
1962 if(cnum < NUMCORES) {
1963 if(COMPACTPHASE == gcphase) {
1964 gcfilledblocks[cnum] = filledblocks;
1965 gcloads[cnum] = heaptop;
1972 if(gcfindSpareMem_I(&startaddr, &tomove, &dstcore, data4, cnum)) {
1974 cache_msg_4(cnum, GCMOVESTART, dstcore, startaddr, tomove);
1976 send_msg_4(cnum, GCMOVESTART, dstcore, startaddr, tomove);
1980 gccorestatus[cnum] = 0;
1981 // check if there is pending move request
1982 /*if(gcmovepending > 0) {
1984 for(j = 0; j < NUMCORES; j++) {
1985 if(gcrequiredmems[j]>0) {
1993 gcrequiredmems[j] = assignSpareMem_I(cnum,
1997 if(STARTUPCORE == j) {
2000 gcmovestartaddr = startaddr;
2001 gcblock2fill = tomove;
2004 cache_msg_4(j, GCMOVESTART, cnum, startaddr, tomove);
2006 send_msg_4(j, GCMOVESTART, cnum, startaddr, tomove);
2008 } // if(STARTUPCORE == j)
2009 if(gcrequiredmems[j] == 0) {
2012 } // if(j < NUMCORES)
2013 } // if(gcmovepending > 0) */
2015 } // if(cnum < NUMCORES)
2019 case GCFINISHFLUSH: {
2020 // received a flush phase finish msg
2021 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
2022 // non startup core can not receive this msg
2025 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
2027 BAMBOO_EXIT(0xb004);
2029 if(msgdata[1] < NUMCORES) {
2030 gccorestatus[msgdata[1]] = 0;
2036 // received a GC finish msg
2037 gcphase = FINISHPHASE;
2041 case GCMARKCONFIRM: {
2042 // received a marked phase finish confirm request msg
2043 if((BAMBOO_NUM_OF_CORE == STARTUPCORE)
2044 || (BAMBOO_NUM_OF_CORE > NUMCORES - 1)) {
2045 // wrong core to receive such msg
2046 BAMBOO_EXIT(0xb005);
2048 // send response msg
2050 cache_msg_5(STARTUPCORE, GCMARKREPORT, BAMBOO_NUM_OF_CORE,
2051 gcbusystatus, gcself_numsendobjs,
2052 gcself_numreceiveobjs);
2054 send_msg_5(STARTUPCORE, GCMARKREPORT, BAMBOO_NUM_OF_CORE,
2055 gcbusystatus, gcself_numsendobjs, gcself_numreceiveobjs);
2061 case GCMARKREPORT: {
2062 // received a marked phase finish confirm response msg
2063 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
2064 // wrong core to receive such msg
2066 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
2068 BAMBOO_EXIT(0xb006);
2073 gccorestatus[msgdata[1]] = msgdata[2];
2074 gcnumsendobjs[msgdata[1]] = msgdata[3];
2075 gcnumreceiveobjs[msgdata[1]] = msgdata[4];
2081 // received a markedObj msg
2082 gc_enqueue_I(msgdata[1]);
2083 gcself_numreceiveobjs++;
2084 gcbusystatus = true;
2089 // received a start moving objs msg
2091 gcdstcore = msgdata[1];
2092 gcmovestartaddr = msgdata[2];
2093 gcblock2fill = msgdata[3];
2097 case GCMAPREQUEST: {
2098 // received a mapping info request msg
2099 void * dstptr = NULL;
2100 RuntimeHashget(gcpointertbl, msgdata[1], &dstptr);
2101 if(NULL == dstptr) {
2102 // no such pointer in this core, something is wrong
2104 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
2105 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
2107 BAMBOO_EXIT(0xb007);
2109 // send back the mapping info
2111 cache_msg_3(msgdata[2], GCMAPINFO, msgdata[1], (int)dstptr);
2113 send_msg_3(msgdata[2], GCMAPINFO, msgdata[1], (int)dstptr);
2120 // received a mapping info response msg
2121 if(msgdata[1] != gcobj2map) {
2122 // obj not matched, something is wrong
2124 BAMBOO_DEBUGPRINT_REG(gcobj2map);
2125 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
2127 BAMBOO_EXIT(0xb008);
2129 gcmappedobj = msgdata[2];
2130 RuntimeHashadd_I(gcpointertbl, gcobj2map, gcmappedobj);
2136 case GCLOBJREQUEST: {
2137 // received a large objs info request msg
2138 transferMarkResults_I();
2143 // received a large objs info response msg
2146 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
2148 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
2150 BAMBOO_EXIT(0xb009);
2152 // store the mark result info
2153 int cnum = msgdata[2];
2154 gcloads[cnum] = msgdata[3];
2155 if(gcheaptop < msgdata[4]) {
2156 gcheaptop = msgdata[4];
2158 // large obj info here
2159 for(int k = 5; k < msgdata[1];) {
2160 int lobj = msgdata[k++];
2161 int length = msgdata[k++];
2162 gc_lobjenqueue_I(lobj, length, cnum);
2164 } // for(int k = 5; k < msgdata[1];)
2168 case GCLOBJMAPPING: {
2169 // received a large obj mapping info msg
2170 RuntimeHashadd_I(gcpointertbl, msgdata[1], msgdata[2]);
2179 for(msgdataindex--; msgdataindex > 0; --msgdataindex) {
2180 msgdata[msgdataindex] = -1;
2186 BAMBOO_DEBUGPRINT(0xe88d);
2190 if(BAMBOO_MSG_AVAIL() != 0) {
2203 BAMBOO_DEBUGPRINT(0xe88e);
2207 /* if(isInterrupt) {
2215 int enqueuetasks(struct parameterwrapper *parameter,
2216 struct parameterwrapper *prevptr,
2217 struct ___Object___ *ptr,
2219 int numenterflags) {
2220 void * taskpointerarray[MAXTASKPARAMS];
2222 //int numparams=parameter->task->numParameters;
2223 int numiterators=parameter->task->numTotal-1;
2226 struct taskdescriptor * task=parameter->task;
2228 //this add the object to parameterwrapper
2229 ObjectHashadd(parameter->objectset, (int) ptr, 0, (int) enterflags,
2230 numenterflags, enterflags==NULL);
2232 /* Add enqueued object to parameter vector */
2233 taskpointerarray[parameter->slot]=ptr;
2235 /* Reset iterators */
2236 for(j=0; j<numiterators; j++) {
2237 toiReset(¶meter->iterators[j]);
2240 /* Find initial state */
2241 for(j=0; j<numiterators; j++) {
2243 if(toiHasNext(¶meter->iterators[j],taskpointerarray OPTARG(failed)))
2244 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2246 /* Need to backtrack */
2247 toiReset(¶meter->iterators[j]);
2251 /* Nothing to enqueue */
2257 /* Enqueue current state */
2259 struct taskparamdescriptor *tpd=
2260 RUNMALLOC(sizeof(struct taskparamdescriptor));
2262 tpd->numParameters=numiterators+1;
2263 tpd->parameterArray=RUNMALLOC(sizeof(void *)*(numiterators+1));
2265 for(j=0; j<=numiterators; j++) {
2266 //store the actual parameters
2267 tpd->parameterArray[j]=taskpointerarray[j];
2270 if ((/*!gencontains(failedtasks, tpd)&&*/
2271 !gencontains(activetasks,tpd))) {
2272 genputtable(activetasks, tpd, tpd);
2274 RUNFREE(tpd->parameterArray);
2278 /* This loop iterates to the next parameter combination */
2279 if (numiterators==0)
2282 for(j=numiterators-1; j<numiterators; j++) {
2284 if(toiHasNext(¶meter->iterators[j],taskpointerarray OPTARG(failed)))
2285 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2287 /* Need to backtrack */
2288 toiReset(¶meter->iterators[j]);
2292 /* Nothing more to enqueue */
2300 int enqueuetasks_I(struct parameterwrapper *parameter,
2301 struct parameterwrapper *prevptr,
2302 struct ___Object___ *ptr,
2304 int numenterflags) {
2305 void * taskpointerarray[MAXTASKPARAMS];
2307 //int numparams=parameter->task->numParameters;
2308 int numiterators=parameter->task->numTotal-1;
2313 struct taskdescriptor * task=parameter->task;
2315 //this add the object to parameterwrapper
2316 ObjectHashadd_I(parameter->objectset, (int) ptr, 0, (int) enterflags,
2317 numenterflags, enterflags==NULL);
2319 /* Add enqueued object to parameter vector */
2320 taskpointerarray[parameter->slot]=ptr;
2322 /* Reset iterators */
2323 for(j=0; j<numiterators; j++) {
2324 toiReset(¶meter->iterators[j]);
2327 /* Find initial state */
2328 for(j=0; j<numiterators; j++) {
2330 if(toiHasNext(¶meter->iterators[j],taskpointerarray OPTARG(failed)))
2331 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2333 /* Need to backtrack */
2334 toiReset(¶meter->iterators[j]);
2338 /* Nothing to enqueue */
2344 /* Enqueue current state */
2346 struct taskparamdescriptor *tpd=
2347 RUNMALLOC_I(sizeof(struct taskparamdescriptor));
2349 tpd->numParameters=numiterators+1;
2350 tpd->parameterArray=RUNMALLOC_I(sizeof(void *)*(numiterators+1));
2352 for(j=0; j<=numiterators; j++) {
2353 //store the actual parameters
2354 tpd->parameterArray[j]=taskpointerarray[j];
2357 if ((/*!gencontains(failedtasks, tpd)&&*/
2358 !gencontains(activetasks,tpd))) {
2359 genputtable_I(activetasks, tpd, tpd);
2361 RUNFREE(tpd->parameterArray);
2365 /* This loop iterates to the next parameter combination */
2366 if (numiterators==0)
2369 for(j=numiterators-1; j<numiterators; j++) {
2371 if(toiHasNext(¶meter->iterators[j], taskpointerarray OPTARG(failed)))
2372 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2374 /* Need to backtrack */
2375 toiReset(¶meter->iterators[j]);
2379 /* Nothing more to enqueue */
2393 int containstag(struct ___Object___ *ptr,
2394 struct ___TagDescriptor___ *tag);
2396 #ifndef MULTICORE_GC
2397 void releasewritelock_r(void * lock, void * redirectlock) {
2399 int reallock = (int)lock;
2400 targetcore = (reallock >> 5) % BAMBOO_TOTALCORE;
2403 BAMBOO_DEBUGPRINT(0xe671);
2404 BAMBOO_DEBUGPRINT_REG((int)lock);
2405 BAMBOO_DEBUGPRINT_REG(reallock);
2406 BAMBOO_DEBUGPRINT_REG(targetcore);
2409 if(targetcore == BAMBOO_NUM_OF_CORE) {
2410 BAMBOO_START_CRITICAL_SECTION_LOCK();
2412 BAMBOO_DEBUGPRINT(0xf001);
2414 // reside on this core
2415 if(!RuntimeHashcontainskey(locktbl, reallock)) {
2416 // no locks for this object, something is wrong
2417 BAMBOO_EXIT(0xa011);
2420 struct LockValue * lockvalue = NULL;
2422 BAMBOO_DEBUGPRINT(0xe672);
2424 RuntimeHashget(locktbl, reallock, &rwlock_obj);
2425 lockvalue = (struct LockValue *)rwlock_obj;
2427 BAMBOO_DEBUGPRINT_REG(lockvalue->value);
2430 lockvalue->redirectlock = (int)redirectlock;
2432 BAMBOO_DEBUGPRINT_REG(lockvalue->value);
2435 BAMBOO_CLOSE_CRITICAL_SECTION_LOCK();
2437 BAMBOO_DEBUGPRINT(0xf000);
2441 // send lock release with redirect info msg
2442 // for 32 bit machine, the size is always 4 words
2443 send_msg_4(targetcore, REDIRECTRELEASE, 1, (int)lock, (int)redirectlock);
2448 void executetasks() {
2449 void * taskpointerarray[MAXTASKPARAMS+OFFSET];
2452 struct ___Object___ * tmpparam = NULL;
2453 struct parameterdescriptor * pd=NULL;
2454 struct parameterwrapper *pw=NULL;
2459 struct LockValue locks[MAXTASKPARAMS];
2466 while(hashsize(activetasks)>0) {
2471 BAMBOO_DEBUGPRINT(0xe990);
2474 /* See if there are any active tasks */
2475 if (hashsize(activetasks)>0) {
2478 #ifdef ACCURATEPROFILE
2479 profileTaskStart("tpd checking");
2483 currtpd=(struct taskparamdescriptor *) getfirstkey(activetasks);
2484 genfreekey(activetasks, currtpd);
2486 numparams=currtpd->task->numParameters;
2487 numtotal=currtpd->task->numTotal;
2489 // clear the lockRedirectTbl
2490 // (TODO, this table should be empty after all locks are released)
2492 for(j = 0; j < MAXTASKPARAMS; j++) {
2493 locks[j].redirectlock = 0;
2496 // get all required locks
2498 // check which locks are needed
2499 for(i = 0; i < numparams; i++) {
2500 void * param = currtpd->parameterArray[i];
2504 if(((struct ___Object___ *)param)->type == STARTUPTYPE) {
2506 taskpointerarray[i+OFFSET]=param;
2509 if(((struct ___Object___ *)param)->lock == NULL) {
2510 tmplock = (int)param;
2512 tmplock = (int)(((struct ___Object___ *)param)->lock);
2514 // insert into the locks array
2515 for(j = 0; j < locklen; j++) {
2516 if(locks[j].value == tmplock) {
2519 } else if(locks[j].value > tmplock) {
2526 locks[h].redirectlock = locks[h-1].redirectlock;
2527 locks[h].value = locks[h-1].value;
2529 locks[j].value = tmplock;
2530 locks[j].redirectlock = (int)param;
2533 } // line 2713: for(i = 0; i < numparams; i++)
2534 // grab these required locks
2536 BAMBOO_DEBUGPRINT(0xe991);
2538 for(i = 0; i < locklen; i++) {
2539 int * lock = (int *)(locks[i].redirectlock);
2541 // require locks for this parameter if it is not a startup object
2543 BAMBOO_DEBUGPRINT_REG((int)lock);
2544 BAMBOO_DEBUGPRINT_REG((int)(locks[i].value));
2547 BAMBOO_START_CRITICAL_SECTION();
2549 BAMBOO_DEBUGPRINT(0xf001);
2552 //isInterrupt = false;
2555 BAMBOO_WAITING_FOR_LOCK();
2559 while(BAMBOO_WAITING_FOR_LOCK() != -1) {
2563 grount = lockresult;
2573 //isInterrupt = true;
2575 BAMBOO_CLOSE_CRITICAL_SECTION();
2577 BAMBOO_DEBUGPRINT(0xf000);
2583 BAMBOO_DEBUGPRINT(0xe992);
2585 // can not get the lock, try later
2586 // releas all grabbed locks for previous parameters
2587 for(j = 0; j < i; ++j) {
2588 lock = (int*)(locks[j].redirectlock);
2589 releasewritelock(lock);
2591 genputtable(activetasks, currtpd, currtpd);
2592 if(hashsize(activetasks) == 1) {
2593 // only one task right now, wait a little while before next try
2599 #ifdef ACCURATEPROFILE
2600 // fail, set the end of the checkTaskInfo
2605 } // line 2794: if(grount == 0)
2606 } // line 2752: for(i = 0; i < locklen; i++)
2609 BAMBOO_DEBUGPRINT(0xe993);
2611 /* Make sure that the parameters are still in the queues */
2612 for(i=0; i<numparams; i++) {
2613 void * parameter=currtpd->parameterArray[i];
2617 BAMBOO_CACHE_FLUSH_RANGE((int)parameter,
2618 classsize[((struct ___Object___ *)parameter)->type]);
2620 tmpparam = (struct ___Object___ *)parameter;
2621 pd=currtpd->task->descriptorarray[i];
2622 pw=(struct parameterwrapper *) pd->queue;
2623 /* Check that object is still in queue */
2625 if (!ObjectHashcontainskey(pw->objectset, (int) parameter)) {
2627 BAMBOO_DEBUGPRINT(0xe994);
2629 // release grabbed locks
2630 for(j = 0; j < locklen; ++j) {
2631 int * lock = (int *)(locks[j].redirectlock);
2632 releasewritelock(lock);
2634 RUNFREE(currtpd->parameterArray);
2640 /* Check if the object's flags still meets requirements */
2644 for(tmpi = 0; tmpi < pw->numberofterms; ++tmpi) {
2645 andmask=pw->intarray[tmpi*2];
2646 checkmask=pw->intarray[tmpi*2+1];
2647 if((((struct ___Object___ *)parameter)->flag&andmask)==checkmask) {
2653 // flags are never suitable
2654 // remove this obj from the queue
2656 int UNUSED, UNUSED2;
2659 BAMBOO_DEBUGPRINT(0xe995);
2661 ObjectHashget(pw->objectset, (int) parameter, (int *) &next,
2662 (int *) &enterflags, &UNUSED, &UNUSED2);
2663 ObjectHashremove(pw->objectset, (int)parameter);
2664 if (enterflags!=NULL)
2665 RUNFREE(enterflags);
2666 // release grabbed locks
2667 for(j = 0; j < locklen; ++j) {
2668 int * lock = (int *)(locks[j].redirectlock);
2669 releasewritelock(lock);
2671 RUNFREE(currtpd->parameterArray);
2675 #ifdef ACCURATEPROFILE
2676 // fail, set the end of the checkTaskInfo
2681 } // line 2878: if (!ismet)
2685 /* Check that object still has necessary tags */
2686 for(j=0; j<pd->numbertags; j++) {
2687 int slotid=pd->tagarray[2*j]+numparams;
2688 struct ___TagDescriptor___ *tagd=currtpd->parameterArray[slotid];
2689 if (!containstag(parameter, tagd)) {
2691 BAMBOO_DEBUGPRINT(0xe996);
2694 // release grabbed locks
2696 for(tmpj = 0; tmpj < locklen; ++tmpj) {
2697 int * lock = (int *)(locks[tmpj].redirectlock);
2698 releasewritelock(lock);
2701 RUNFREE(currtpd->parameterArray);
2705 } // line2911: if (!containstag(parameter, tagd))
2706 } // line 2808: for(j=0; j<pd->numbertags; j++)
2708 taskpointerarray[i+OFFSET]=parameter;
2709 } // line 2824: for(i=0; i<numparams; i++)
2711 for(; i<numtotal; i++) {
2712 taskpointerarray[i+OFFSET]=currtpd->parameterArray[i];
2717 /* Actually call task */
2719 ((int *)taskpointerarray)[0]=currtpd->numParameters;
2720 taskpointerarray[1]=NULL;
2723 #ifdef ACCURATEPROFILE
2724 // check finish, set the end of the checkTaskInfo
2727 profileTaskStart(currtpd->task->name);
2731 BAMBOO_DEBUGPRINT(0xe997);
2733 ((void(*) (void **))currtpd->task->taskptr)(taskpointerarray);
2735 #ifdef ACCURATEPROFILE
2736 // task finish, set the end of the checkTaskInfo
2738 // new a PostTaskInfo for the post-task execution
2739 profileTaskStart("post task execution");
2743 BAMBOO_DEBUGPRINT(0xe998);
2744 BAMBOO_DEBUGPRINT_REG(islock);
2749 BAMBOO_DEBUGPRINT(0xe999);
2751 for(i = 0; i < locklen; ++i) {
2752 void * ptr = (void *)(locks[i].redirectlock);
2753 int * lock = (int *)(locks[i].value);
2755 BAMBOO_DEBUGPRINT_REG((int)ptr);
2756 BAMBOO_DEBUGPRINT_REG((int)lock);
2758 #ifndef MULTICORE_GC
2759 if(RuntimeHashcontainskey(lockRedirectTbl, (int)lock)) {
2761 RuntimeHashget(lockRedirectTbl, (int)lock, &redirectlock);
2762 RuntimeHashremovekey(lockRedirectTbl, (int)lock);
2763 releasewritelock_r(lock, (int *)redirectlock);
2768 releasewritelock(ptr);
2771 } // line 3015: if(islock)
2774 // post task execution finish, set the end of the postTaskInfo
2778 // Free up task parameter descriptor
2779 RUNFREE(currtpd->parameterArray);
2783 BAMBOO_DEBUGPRINT(0xe99a);
2786 } // if (hashsize(activetasks)>0)
2787 } // while(hashsize(activetasks)>0)
2789 BAMBOO_DEBUGPRINT(0xe99b);
2793 /* This function processes an objects tags */
2794 void processtags(struct parameterdescriptor *pd,
2796 struct parameterwrapper *parameter,
2797 int * iteratorcount,
2802 for(i=0; i<pd->numbertags; i++) {
2803 int slotid=pd->tagarray[2*i];
2804 int tagid=pd->tagarray[2*i+1];
2806 if (statusarray[slotid+numparams]==0) {
2807 parameter->iterators[*iteratorcount].istag=1;
2808 parameter->iterators[*iteratorcount].tagid=tagid;
2809 parameter->iterators[*iteratorcount].slot=slotid+numparams;
2810 parameter->iterators[*iteratorcount].tagobjectslot=index;
2811 statusarray[slotid+numparams]=1;
2818 void processobject(struct parameterwrapper *parameter,
2820 struct parameterdescriptor *pd,
2826 struct ObjectHash * objectset=
2827 ((struct parameterwrapper *)pd->queue)->objectset;
2829 parameter->iterators[*iteratorcount].istag=0;
2830 parameter->iterators[*iteratorcount].slot=index;
2831 parameter->iterators[*iteratorcount].objectset=objectset;
2832 statusarray[index]=1;
2834 for(i=0; i<pd->numbertags; i++) {
2835 int slotid=pd->tagarray[2*i];
2836 //int tagid=pd->tagarray[2*i+1];
2837 if (statusarray[slotid+numparams]!=0) {
2838 /* This tag has already been enqueued, use it to narrow search */
2839 parameter->iterators[*iteratorcount].tagbindings[tagcount]=
2844 parameter->iterators[*iteratorcount].numtags=tagcount;
2849 /* This function builds the iterators for a task & parameter */
2851 void builditerators(struct taskdescriptor * task,
2853 struct parameterwrapper * parameter) {
2854 int statusarray[MAXTASKPARAMS];
2856 int numparams=task->numParameters;
2857 int iteratorcount=0;
2858 for(i=0; i<MAXTASKPARAMS; i++) statusarray[i]=0;
2860 statusarray[index]=1; /* Initial parameter */
2861 /* Process tags for initial iterator */
2863 processtags(task->descriptorarray[index], index, parameter,
2864 &iteratorcount, statusarray, numparams);
2868 /* Check for objects with existing tags */
2869 for(i=0; i<numparams; i++) {
2870 if (statusarray[i]==0) {
2871 struct parameterdescriptor *pd=task->descriptorarray[i];
2873 for(j=0; j<pd->numbertags; j++) {
2874 int slotid=pd->tagarray[2*j];
2875 if(statusarray[slotid+numparams]!=0) {
2876 processobject(parameter, i, pd, &iteratorcount, statusarray,
2878 processtags(pd, i, parameter, &iteratorcount, statusarray, numparams);
2885 /* Next do objects w/ unbound tags*/
2887 for(i=0; i<numparams; i++) {
2888 if (statusarray[i]==0) {
2889 struct parameterdescriptor *pd=task->descriptorarray[i];
2890 if (pd->numbertags>0) {
2891 processobject(parameter, i, pd, &iteratorcount, statusarray, numparams);
2892 processtags(pd, i, parameter, &iteratorcount, statusarray, numparams);
2898 /* Nothing with a tag enqueued */
2900 for(i=0; i<numparams; i++) {
2901 if (statusarray[i]==0) {
2902 struct parameterdescriptor *pd=task->descriptorarray[i];
2903 processobject(parameter, i, pd, &iteratorcount, statusarray, numparams);
2904 processtags(pd, i, parameter, &iteratorcount, statusarray, numparams);
2917 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
2920 for(i=0; i<numtasks[BAMBOO_NUM_OF_CORE]; i++) {
2921 struct taskdescriptor * task=taskarray[BAMBOO_NUM_OF_CORE][i];
2923 printf("%s\n", task->name);
2925 for(j=0; j<task->numParameters; j++) {
2926 struct parameterdescriptor *param=task->descriptorarray[j];
2927 struct parameterwrapper *parameter=param->queue;
2928 struct ObjectHash * set=parameter->objectset;
2929 struct ObjectIterator objit;
2931 printf(" Parameter %d\n", j);
2933 ObjectHashiterator(set, &objit);
2934 while(ObjhasNext(&objit)) {
2935 struct ___Object___ * obj=(struct ___Object___ *)Objkey(&objit);
2936 struct ___Object___ * tagptr=obj->___tags___;
2937 int nonfailed=Objdata4(&objit);
2938 int numflags=Objdata3(&objit);
2939 int flags=Objdata2(&objit);
2942 printf(" Contains %lx\n", obj);
2943 printf(" flag=%d\n", obj->flag);
2946 } else if (tagptr->type==TAGTYPE) {
2948 printf(" tag=%lx\n",tagptr);
2954 struct ArrayObject *ao=(struct ArrayObject *)tagptr;
2955 for(; tagindex<ao->___cachedCode___; tagindex++) {
2957 printf(" tag=%lx\n",ARRAYGET(ao, struct ___TagDescriptor___*,
2970 /* This function processes the task information to create queues for
2971 each parameter type. */
2973 void processtasks() {
2975 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
2978 for(i=0; i<numtasks[BAMBOO_NUM_OF_CORE]; i++) {
2979 struct taskdescriptor * task=taskarray[BAMBOO_NUM_OF_CORE][i];
2982 /* Build objectsets */
2983 for(j=0; j<task->numParameters; j++) {
2984 struct parameterdescriptor *param=task->descriptorarray[j];
2985 struct parameterwrapper *parameter=param->queue;
2986 parameter->objectset=allocateObjectHash(10);
2987 parameter->task=task;
2990 /* Build iterators for parameters */
2991 for(j=0; j<task->numParameters; j++) {
2992 struct parameterdescriptor *param=task->descriptorarray[j];
2993 struct parameterwrapper *parameter=param->queue;
2994 builditerators(task, j, parameter);
2999 void toiReset(struct tagobjectiterator * it) {
3002 } else if (it->numtags>0) {
3005 ObjectHashiterator(it->objectset, &it->it);
3009 int toiHasNext(struct tagobjectiterator *it,
3010 void ** objectarray OPTARG(int * failed)) {
3013 /* Get object with tags */
3014 struct ___Object___ *obj=objectarray[it->tagobjectslot];
3015 struct ___Object___ *tagptr=obj->___tags___;
3016 if (tagptr->type==TAGTYPE) {
3017 if ((it->tagobjindex==0)&& /* First object */
3018 (it->tagid==((struct ___TagDescriptor___ *)tagptr)->flag)) /* Right tag type */
3023 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
3024 int tagindex=it->tagobjindex;
3025 for(; tagindex<ao->___cachedCode___; tagindex++) {
3026 struct ___TagDescriptor___ *td=
3027 ARRAYGET(ao, struct ___TagDescriptor___ *, tagindex);
3028 if (td->flag==it->tagid) {
3029 it->tagobjindex=tagindex; /* Found right type of tag */
3035 } else if (it->numtags>0) {
3036 /* Use tags to locate appropriate objects */
3037 struct ___TagDescriptor___ *tag=objectarray[it->tagbindings[0]];
3038 struct ___Object___ *objptr=tag->flagptr;
3040 if (objptr->type!=OBJECTARRAYTYPE) {
3041 if (it->tagobjindex>0)
3043 if (!ObjectHashcontainskey(it->objectset, (int) objptr))
3045 for(i=1; i<it->numtags; i++) {
3046 struct ___TagDescriptor___ *tag2=objectarray[it->tagbindings[i]];
3047 if (!containstag(objptr,tag2))
3052 struct ArrayObject *ao=(struct ArrayObject *) objptr;
3055 for(tagindex=it->tagobjindex;tagindex<ao->___cachedCode___;tagindex++) {
3056 struct ___Object___ *objptr=ARRAYGET(ao, struct ___Object___*, tagindex);
3057 if (!ObjectHashcontainskey(it->objectset, (int) objptr))
3059 for(i=1; i<it->numtags; i++) {
3060 struct ___TagDescriptor___ *tag2=objectarray[it->tagbindings[i]];
3061 if (!containstag(objptr,tag2))
3064 it->tagobjindex=tagindex;
3069 it->tagobjindex=tagindex;
3073 return ObjhasNext(&it->it);
3077 int containstag(struct ___Object___ *ptr,
3078 struct ___TagDescriptor___ *tag) {
3080 struct ___Object___ * objptr=tag->flagptr;
3081 if (objptr->type==OBJECTARRAYTYPE) {
3082 struct ArrayObject *ao=(struct ArrayObject *)objptr;
3083 for(j=0; j<ao->___cachedCode___; j++) {
3084 if (ptr==ARRAYGET(ao, struct ___Object___*, j))
3092 void toiNext(struct tagobjectiterator *it,
3093 void ** objectarray OPTARG(int * failed)) {
3094 /* hasNext has all of the intelligence */
3097 /* Get object with tags */
3098 struct ___Object___ *obj=objectarray[it->tagobjectslot];
3099 struct ___Object___ *tagptr=obj->___tags___;
3100 if (tagptr->type==TAGTYPE) {
3102 objectarray[it->slot]=tagptr;
3104 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
3105 objectarray[it->slot]=
3106 ARRAYGET(ao, struct ___TagDescriptor___ *, it->tagobjindex++);
3108 } else if (it->numtags>0) {
3109 /* Use tags to locate appropriate objects */
3110 struct ___TagDescriptor___ *tag=objectarray[it->tagbindings[0]];
3111 struct ___Object___ *objptr=tag->flagptr;
3112 if (objptr->type!=OBJECTARRAYTYPE) {
3114 objectarray[it->slot]=objptr;
3116 struct ArrayObject *ao=(struct ArrayObject *) objptr;
3117 objectarray[it->slot]=
3118 ARRAYGET(ao, struct ___Object___ *, it->tagobjindex++);
3121 /* Iterate object */
3122 objectarray[it->slot]=(void *)Objkey(&it->it);