3 #include "multicoreruntime.h"
4 #include "runtime_arch.h"
5 #include "GenericHashtable.h"
7 // data structures for task invocation
8 struct genhashtable * activetasks;
9 struct taskparamdescriptor * currtpd;
10 struct LockValue runtime_locks[MAXTASKPARAMS];
13 // specific functions used inside critical sections
14 void enqueueObject_I(void * ptr,
15 struct parameterwrapper ** queues,
17 int enqueuetasks_I(struct parameterwrapper *parameter,
18 struct parameterwrapper *prevptr,
19 struct ___Object___ *ptr,
24 inline __attribute__((always_inline))
25 void setupsmemmode(void) {
27 bamboo_smem_mode = SMEMLOCAL;
29 bamboo_smem_mode = SMEMFIXED;
31 bamboo_smem_mode = SMEMMIXED;
33 bamboo_smem_mode = SMEMGLOBAL;
35 // defaultly using local mode
36 bamboo_smem_mode = SMEMLOCAL;
38 } // void setupsmemmode(void)
41 inline __attribute__((always_inline))
42 void initruntimedata() {
44 // initialize the arrays
45 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
46 // startup core to initialize corestatus[]
47 for(i = 0; i < NUMCORESACTIVE; ++i) {
50 numreceiveobjs[i] = 0;
52 // initialize the profile data arrays
55 } // for(i = 0; i < NUMCORESACTIVE; ++i)
57 for(i = 0; i < NUMCORES; ++i) {
60 gcnumreceiveobjs[i] = 0;
62 gcrequiredmems[i] = 0;
64 gcfilledblocks[i] = 0;
65 } // for(i = 0; i < NUMCORES; ++i)
76 self_numreceiveobjs = 0;
78 for(i = 0; i < BAMBOO_MSG_BUF_LENGTH; ++i) {
82 msglength = BAMBOO_MSG_BUF_LENGTH;
83 for(i = 0; i < BAMBOO_OUT_BUF_LENGTH; ++i) {
93 bamboo_cur_msp = NULL;
95 totransobjqueue = createQueue();
100 gcphase = FINISHPHASE;
102 gcself_numsendobjs = 0;
103 gcself_numreceiveobjs = 0;
104 gcmarkedptrbound = 0;
105 gcpointertbl = allocateRuntimeHash(20);
117 gcsbstarttbl = BAMBOO_BASE_VA;
118 gcsmemtbl = RUNMALLOC_I(sizeof(int)*gcnumblock);
120 // create the lock table, lockresult table and obj queue
123 (struct RuntimeNode **) RUNMALLOC_I(sizeof(struct RuntimeNode *)*20);
124 /* Set allocation blocks*/
125 locktable.listhead=NULL;
126 locktable.listtail=NULL;
128 locktable.numelements = 0;
133 lockRedirectTbl = allocateRuntimeHash(20);
134 objRedirectLockTbl = allocateRuntimeHash(20);
139 objqueue.head = NULL;
140 objqueue.tail = NULL;
146 //isInterrupt = true;
149 taskInfoOverflow = false;
150 /*interruptInfoIndex = 0;
151 interruptInfoOverflow = false;*/
154 for(i = 0; i < MAXTASKPARAMS; i++) {
155 runtime_locks[i].redirectlock = 0;
156 runtime_locks[i].value = 0;
161 inline __attribute__((always_inline))
162 void disruntimedata() {
164 freeRuntimeHash(gcpointertbl);
166 freeRuntimeHash(lockRedirectTbl);
167 freeRuntimeHash(objRedirectLockTbl);
168 RUNFREE(locktable.bucket);
170 genfreehashtable(activetasks);
171 if(currtpd != NULL) {
172 RUNFREE(currtpd->parameterArray);
178 inline __attribute__((always_inline))
179 bool checkObjQueue() {
181 struct transObjInfo * objInfo = NULL;
185 #ifdef ACCURATEPROFILE
186 bool isChecking = false;
187 if(!isEmpty(&objqueue)) {
188 profileTaskStart("objqueue checking");
190 } // if(!isEmpty(&objqueue))
194 while(!isEmpty(&objqueue)) {
196 BAMBOO_START_CRITICAL_SECTION_OBJ_QUEUE();
198 BAMBOO_DEBUGPRINT(0xf001);
201 //isInterrupt = false;
204 BAMBOO_DEBUGPRINT(0xeee1);
207 objInfo = (struct transObjInfo *)getItem(&objqueue);
208 obj = objInfo->objptr;
210 BAMBOO_DEBUGPRINT_REG((int)obj);
212 // grab lock and flush the obj
216 BAMBOO_WAITING_FOR_LOCK();
217 } // while(!lockflag)
220 BAMBOO_DEBUGPRINT_REG(grount);
235 BAMBOO_CACHE_FLUSH_RANGE((int)obj,sizeof(int));
236 BAMBOO_CACHE_FLUSH_RANGE((int)obj,
237 classsize[((struct ___Object___ *)obj)->type]);
239 // enqueue the object
240 for(k = 0; k < objInfo->length; ++k) {
241 int taskindex = objInfo->queues[2 * k];
242 int paramindex = objInfo->queues[2 * k + 1];
243 struct parameterwrapper ** queues =
244 &(paramqueues[BAMBOO_NUM_OF_CORE][taskindex][paramindex]);
246 BAMBOO_DEBUGPRINT_REG(taskindex);
247 BAMBOO_DEBUGPRINT_REG(paramindex);
248 struct ___Object___ * tmpptr = (struct ___Object___ *)obj;
249 tprintf("Process %x(%d): receive obj %x(%lld), ptrflag %x\n",
250 BAMBOO_NUM_OF_CORE, BAMBOO_NUM_OF_CORE, (int)obj,
251 (long)obj, tmpptr->flag);
253 enqueueObject_I(obj, queues, 1);
255 BAMBOO_DEBUGPRINT_REG(hashsize(activetasks));
257 } // for(k = 0; k < objInfo->length; ++k)
258 releasewritelock_I(obj);
259 RUNFREE(objInfo->queues);
263 // put it at the end of the queue if no update version in the queue
264 struct QueueItem * qitem = getHead(&objqueue);
265 struct QueueItem * prev = NULL;
266 while(qitem != NULL) {
267 struct transObjInfo * tmpinfo =
268 (struct transObjInfo *)(qitem->objectptr);
269 if(tmpinfo->objptr == obj) {
270 // the same object in the queue, which should be enqueued
271 // recently. Current one is outdate, do not re-enqueue it
272 RUNFREE(objInfo->queues);
277 } // if(tmpinfo->objptr == obj)
278 qitem = getNextQueueItem(prev);
279 } // while(qitem != NULL)
280 // try to execute active tasks already enqueued first
281 addNewItem_I(&objqueue, objInfo);
283 //isInterrupt = true;
286 BAMBOO_CLOSE_CRITICAL_SECTION_OBJ_QUEUE();
288 BAMBOO_DEBUGPRINT(0xf000);
292 BAMBOO_CLOSE_CRITICAL_SECTION_OBJ_QUEUE();
294 BAMBOO_DEBUGPRINT(0xf000);
296 } // while(!isEmpty(&objqueue))
299 #ifdef ACCURATEPROFILE
307 BAMBOO_DEBUGPRINT(0xee02);
312 inline __attribute__((always_inline))
313 void checkCoreStatus() {
314 bool allStall = false;
318 (waitconfirm && (numconfirm == 0))) {
320 BAMBOO_DEBUGPRINT(0xee04);
321 BAMBOO_DEBUGPRINT_REG(waitconfirm);
323 BAMBOO_START_CRITICAL_SECTION_STATUS();
325 BAMBOO_DEBUGPRINT(0xf001);
327 corestatus[BAMBOO_NUM_OF_CORE] = 0;
328 numsendobjs[BAMBOO_NUM_OF_CORE] = self_numsendobjs;
329 numreceiveobjs[BAMBOO_NUM_OF_CORE] = self_numreceiveobjs;
330 // check the status of all cores
333 BAMBOO_DEBUGPRINT_REG(NUMCORESACTIVE);
335 for(i = 0; i < NUMCORESACTIVE; ++i) {
337 BAMBOO_DEBUGPRINT(0xe000 + corestatus[i]);
339 if(corestatus[i] != 0) {
343 } // for(i = 0; i < NUMCORESACTIVE; ++i)
345 // check if the sum of send objs and receive obj are the same
346 // yes->check if the info is the latest; no->go on executing
348 for(i = 0; i < NUMCORESACTIVE; ++i) {
349 sumsendobj += numsendobjs[i];
351 BAMBOO_DEBUGPRINT(0xf000 + numsendobjs[i]);
353 } // for(i = 0; i < NUMCORESACTIVE; ++i)
354 for(i = 0; i < NUMCORESACTIVE; ++i) {
355 sumsendobj -= numreceiveobjs[i];
357 BAMBOO_DEBUGPRINT(0xf000 + numreceiveobjs[i]);
359 } // for(i = 0; i < NUMCORESACTIVE; ++i)
360 if(0 == sumsendobj) {
362 // the first time found all cores stall
363 // send out status confirm msg to all other cores
364 // reset the corestatus array too
366 BAMBOO_DEBUGPRINT(0xee05);
368 corestatus[BAMBOO_NUM_OF_CORE] = 1;
369 for(i = 1; i < NUMCORESACTIVE; ++i) {
371 // send status confirm msg to core i
372 send_msg_1(i, STATUSCONFIRM);
373 } // for(i = 1; i < NUMCORESACTIVE; ++i)
375 numconfirm = NUMCORESACTIVE - 1;
377 // all the core status info are the latest
378 // terminate; for profiling mode, send request to all
379 // other cores to pour out profiling data
381 BAMBOO_DEBUGPRINT(0xee06);
385 totalexetime = BAMBOO_GET_EXE_TIME();
387 BAMBOO_DEBUGPRINT(BAMBOO_GET_EXE_TIME());
388 BAMBOO_DEBUGPRINT_REG(total_num_t6); // TODO for test
389 BAMBOO_DEBUGPRINT(0xbbbbbbbb);
391 // profile mode, send msgs to other cores to request pouring
392 // out progiling data
394 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
396 BAMBOO_DEBUGPRINT(0xf000);
398 for(i = 1; i < NUMCORESACTIVE; ++i) {
399 // send profile request msg to core i
400 send_msg_2(i, PROFILEOUTPUT, totalexetime);
401 } // for(i = 1; i < NUMCORESACTIVE; ++i)
402 // pour profiling data on startup core
405 BAMBOO_START_CRITICAL_SECTION_STATUS();
407 BAMBOO_DEBUGPRINT(0xf001);
409 profilestatus[BAMBOO_NUM_OF_CORE] = 0;
410 // check the status of all cores
413 BAMBOO_DEBUGPRINT_REG(NUMCORESACTIVE);
415 for(i = 0; i < NUMCORESACTIVE; ++i) {
417 BAMBOO_DEBUGPRINT(0xe000 + profilestatus[i]);
419 if(profilestatus[i] != 0) {
423 } // for(i = 0; i < NUMCORESACTIVE; ++i)
426 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
428 BAMBOO_DEBUGPRINT(0xf000);
438 terminate(); // All done.
439 } // if(!waitconfirm)
441 // still some objects on the fly on the network
442 // reset the waitconfirm and numconfirm
444 BAMBOO_DEBUGPRINT(0xee07);
448 } // if(0 == sumsendobj)
450 // not all cores are stall, keep on waiting
452 BAMBOO_DEBUGPRINT(0xee08);
457 BAMBOO_CLOSE_CRITICAL_SECTION_STATUS();
459 BAMBOO_DEBUGPRINT(0xf000);
461 } // if((!waitconfirm) ||
464 // main function for each core
465 inline void run(void * arg) {
469 bool sendStall = false;
471 bool tocontinue = false;
473 corenum = BAMBOO_GET_NUM_OF_CORE();
475 BAMBOO_DEBUGPRINT(0xeeee);
476 BAMBOO_DEBUGPRINT_REG(corenum);
477 BAMBOO_DEBUGPRINT(STARTUPCORE);
480 // initialize runtime data structures
483 // other architecture related initialization
487 initializeexithandler();
489 // main process of the execution module
490 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
491 // non-executing cores, only processing communications
494 BAMBOO_DEBUGPRINT(0xee01);
495 BAMBOO_DEBUGPRINT_REG(taskInfoIndex);
496 BAMBOO_DEBUGPRINT_REG(taskInfoOverflow);
497 profileTaskStart("msg handling");
501 //isInterrupt = false;
505 /* Create queue of active tasks */
507 genallocatehashtable((unsigned int(*) (void *)) &hashCodetpd,
508 (int(*) (void *,void *)) &comparetpd);
510 /* Process task information */
513 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
514 /* Create startup object */
515 createstartupobject(argc, argv);
519 BAMBOO_DEBUGPRINT(0xee00);
524 // check if need to do GC
528 // check if there are new active tasks can be executed
535 while(receiveObject() != -1) {
540 BAMBOO_DEBUGPRINT(0xee01);
543 // check if there are some pending objects,
544 // if yes, enqueue them and executetasks again
545 tocontinue = checkObjQueue();
549 if(STARTUPCORE == BAMBOO_NUM_OF_CORE) {
552 BAMBOO_DEBUGPRINT(0xee03);
560 BAMBOO_DEBUGPRINT(0xee09);
566 // wait for some time
569 BAMBOO_DEBUGPRINT(0xee0a);
575 // send StallMsg to startup core
577 BAMBOO_DEBUGPRINT(0xee0b);
580 send_msg_4(STARTUPCORE, TRANSTALL, BAMBOO_NUM_OF_CORE,
581 self_numsendobjs, self_numreceiveobjs);
593 BAMBOO_DEBUGPRINT(0xee0c);
596 } // if(STARTUPCORE == BAMBOO_NUM_OF_CORE)
599 } // if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1)
603 struct ___createstartupobject____I_locals {
606 struct ___StartupObject___ * ___startupobject___;
607 struct ArrayObject * ___stringarray___;
608 }; // struct ___createstartupobject____I_locals
610 void createstartupobject(int argc,
614 /* Allocate startup object */
616 struct ___createstartupobject____I_locals ___locals___={2, NULL, NULL, NULL};
617 struct ___StartupObject___ *startupobject=
618 (struct ___StartupObject___*) allocate_new(&___locals___, STARTUPTYPE);
619 ___locals___.___startupobject___ = startupobject;
620 struct ArrayObject * stringarray=
621 allocate_newarray(&___locals___, STRINGARRAYTYPE, argc-1);
622 ___locals___.___stringarray___ = stringarray;
624 struct ___StartupObject___ *startupobject=
625 (struct ___StartupObject___*) allocate_new(STARTUPTYPE);
626 struct ArrayObject * stringarray=
627 allocate_newarray(STRINGARRAYTYPE, argc-1);
629 /* Build array of strings */
630 startupobject->___parameters___=stringarray;
631 for(i=1; i<argc; i++) {
632 int length=strlen(argv[i]);
634 struct ___String___ *newstring=NewString(&___locals___, argv[i],length);
636 struct ___String___ *newstring=NewString(argv[i],length);
638 ((void **)(((char *)&stringarray->___length___)+sizeof(int)))[i-1]=
642 startupobject->version = 0;
643 startupobject->lock = NULL;
645 /* Set initialized flag for startup object */
646 flagorandinit(startupobject,1,0xFFFFFFFF);
647 enqueueObject(startupobject, NULL, 0);
649 BAMBOO_CACHE_FLUSH_ALL();
653 int hashCodetpd(struct taskparamdescriptor *ftd) {
654 int hash=(int)ftd->task;
656 for(i=0; i<ftd->numParameters; i++) {
657 hash^=(int)ftd->parameterArray[i];
662 int comparetpd(struct taskparamdescriptor *ftd1,
663 struct taskparamdescriptor *ftd2) {
665 if (ftd1->task!=ftd2->task)
667 for(i=0; i<ftd1->numParameters; i++)
668 if(ftd1->parameterArray[i]!=ftd2->parameterArray[i])
673 /* This function sets a tag. */
675 void tagset(void *ptr,
676 struct ___Object___ * obj,
677 struct ___TagDescriptor___ * tagd) {
679 void tagset(struct ___Object___ * obj,
680 struct ___TagDescriptor___ * tagd) {
682 struct ArrayObject * ao=NULL;
683 struct ___Object___ * tagptr=obj->___tags___;
685 obj->___tags___=(struct ___Object___ *)tagd;
687 /* Have to check if it is already set */
688 if (tagptr->type==TAGTYPE) {
689 struct ___TagDescriptor___ * td=(struct ___TagDescriptor___ *) tagptr;
694 int ptrarray[]={2, (int) ptr, (int) obj, (int)tagd};
695 struct ArrayObject * ao=
696 allocate_newarray(&ptrarray,TAGARRAYTYPE,TAGARRAYINTERVAL);
697 obj=(struct ___Object___ *)ptrarray[2];
698 tagd=(struct ___TagDescriptor___ *)ptrarray[3];
699 td=(struct ___TagDescriptor___ *) obj->___tags___;
701 ao=allocate_newarray(TAGARRAYTYPE,TAGARRAYINTERVAL);
704 ARRAYSET(ao, struct ___TagDescriptor___ *, 0, td);
705 ARRAYSET(ao, struct ___TagDescriptor___ *, 1, tagd);
706 obj->___tags___=(struct ___Object___ *) ao;
707 ao->___cachedCode___=2;
711 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
712 for(i=0; i<ao->___cachedCode___; i++) {
713 struct ___TagDescriptor___ * td=
714 ARRAYGET(ao, struct ___TagDescriptor___*, i);
719 if (ao->___cachedCode___<ao->___length___) {
720 ARRAYSET(ao, struct ___TagDescriptor___ *, ao->___cachedCode___, tagd);
721 ao->___cachedCode___++;
724 int ptrarray[]={2,(int) ptr, (int) obj, (int) tagd};
725 struct ArrayObject * aonew=
726 allocate_newarray(&ptrarray,TAGARRAYTYPE,
727 TAGARRAYINTERVAL+ao->___length___);
728 obj=(struct ___Object___ *)ptrarray[2];
729 tagd=(struct ___TagDescriptor___ *) ptrarray[3];
730 ao=(struct ArrayObject *)obj->___tags___;
732 struct ArrayObject * aonew=
733 allocate_newarray(TAGARRAYTYPE,TAGARRAYINTERVAL+ao->___length___);
736 aonew->___cachedCode___=ao->___length___+1;
737 for(i=0; i<ao->___length___; i++) {
738 ARRAYSET(aonew, struct ___TagDescriptor___*, i,
739 ARRAYGET(ao, struct ___TagDescriptor___*, i));
741 ARRAYSET(aonew, struct ___TagDescriptor___ *, ao->___length___, tagd);
747 struct ___Object___ * tagset=tagd->flagptr;
750 } else if (tagset->type!=OBJECTARRAYTYPE) {
752 int ptrarray[]={2, (int) ptr, (int) obj, (int)tagd};
753 struct ArrayObject * ao=
754 allocate_newarray(&ptrarray,OBJECTARRAYTYPE,OBJECTARRAYINTERVAL);
755 obj=(struct ___Object___ *)ptrarray[2];
756 tagd=(struct ___TagDescriptor___ *)ptrarray[3];
758 struct ArrayObject * ao=
759 allocate_newarray(OBJECTARRAYTYPE,OBJECTARRAYINTERVAL);
761 ARRAYSET(ao, struct ___Object___ *, 0, tagd->flagptr);
762 ARRAYSET(ao, struct ___Object___ *, 1, obj);
763 ao->___cachedCode___=2;
764 tagd->flagptr=(struct ___Object___ *)ao;
766 struct ArrayObject *ao=(struct ArrayObject *) tagset;
767 if (ao->___cachedCode___<ao->___length___) {
768 ARRAYSET(ao, struct ___Object___*, ao->___cachedCode___++, obj);
772 int ptrarray[]={2, (int) ptr, (int) obj, (int)tagd};
773 struct ArrayObject * aonew=
774 allocate_newarray(&ptrarray,OBJECTARRAYTYPE,
775 OBJECTARRAYINTERVAL+ao->___length___);
776 obj=(struct ___Object___ *)ptrarray[2];
777 tagd=(struct ___TagDescriptor___ *)ptrarray[3];
778 ao=(struct ArrayObject *)tagd->flagptr;
780 struct ArrayObject * aonew=
781 allocate_newarray(OBJECTARRAYTYPE,OBJECTARRAYINTERVAL+ao->___length___);
783 aonew->___cachedCode___=ao->___cachedCode___+1;
784 for(i=0; i<ao->___length___; i++) {
785 ARRAYSET(aonew, struct ___Object___*, i,
786 ARRAYGET(ao, struct ___Object___*, i));
788 ARRAYSET(aonew, struct ___Object___ *, ao->___cachedCode___, obj);
789 tagd->flagptr=(struct ___Object___ *) aonew;
795 /* This function clears a tag. */
797 void tagclear(void *ptr,
798 struct ___Object___ * obj,
799 struct ___TagDescriptor___ * tagd) {
801 void tagclear(struct ___Object___ * obj,
802 struct ___TagDescriptor___ * tagd) {
804 /* We'll assume that tag is alway there.
805 Need to statically check for this of course. */
806 struct ___Object___ * tagptr=obj->___tags___;
808 if (tagptr->type==TAGTYPE) {
809 if ((struct ___TagDescriptor___ *)tagptr==tagd)
810 obj->___tags___=NULL;
812 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
814 for(i=0; i<ao->___cachedCode___; i++) {
815 struct ___TagDescriptor___ * td=
816 ARRAYGET(ao, struct ___TagDescriptor___ *, i);
818 ao->___cachedCode___--;
819 if (i<ao->___cachedCode___)
820 ARRAYSET(ao, struct ___TagDescriptor___ *, i,
821 ARRAYGET(ao, struct ___TagDescriptor___ *, ao->___cachedCode___));
822 ARRAYSET(ao, struct ___TagDescriptor___ *, ao->___cachedCode___, NULL);
823 if (ao->___cachedCode___==0)
824 obj->___tags___=NULL;
831 struct ___Object___ *tagset=tagd->flagptr;
832 if (tagset->type!=OBJECTARRAYTYPE) {
836 struct ArrayObject *ao=(struct ArrayObject *) tagset;
838 for(i=0; i<ao->___cachedCode___; i++) {
839 struct ___Object___ * tobj=ARRAYGET(ao, struct ___Object___ *, i);
841 ao->___cachedCode___--;
842 if (i<ao->___cachedCode___)
843 ARRAYSET(ao, struct ___Object___ *, i,
844 ARRAYGET(ao, struct ___Object___ *, ao->___cachedCode___));
845 ARRAYSET(ao, struct ___Object___ *, ao->___cachedCode___, NULL);
846 if (ao->___cachedCode___==0)
857 /* This function allocates a new tag. */
859 struct ___TagDescriptor___ * allocate_tag(void *ptr,
861 struct ___TagDescriptor___ * v=
862 (struct ___TagDescriptor___ *) FREEMALLOC((struct garbagelist *) ptr,
865 struct ___TagDescriptor___ * allocate_tag(int index) {
866 struct ___TagDescriptor___ * v=FREEMALLOC(classsize[TAGTYPE]);
875 /* This function updates the flag for object ptr. It or's the flag
876 with the or mask and and's it with the andmask. */
878 void flagbody(struct ___Object___ *ptr,
880 struct parameterwrapper ** queues,
884 int flagcomp(const int *val1, const int *val2) {
885 return (*val1)-(*val2);
888 void flagorand(void * ptr,
891 struct parameterwrapper ** queues,
894 int oldflag=((int *)ptr)[1];
895 int flag=ormask|oldflag;
897 flagbody(ptr, flag, queues, length, false);
901 bool intflagorand(void * ptr,
905 int oldflag=((int *)ptr)[1];
906 int flag=ormask|oldflag;
908 if (flag==oldflag) /* Don't do anything */
911 flagbody(ptr, flag, NULL, 0, false);
917 void flagorandinit(void * ptr,
920 int oldflag=((int *)ptr)[1];
921 int flag=ormask|oldflag;
923 flagbody(ptr,flag,NULL,0,true);
926 void flagbody(struct ___Object___ *ptr,
928 struct parameterwrapper ** vqueues,
931 struct parameterwrapper * flagptr = NULL;
933 struct parameterwrapper ** queues = vqueues;
934 int length = vlength;
937 int * enterflags = NULL;
938 if((!isnew) && (queues == NULL)) {
939 if(BAMBOO_NUM_OF_CORE < NUMCORESACTIVE) {
940 queues = objectqueues[BAMBOO_NUM_OF_CORE][ptr->type];
941 length = numqueues[BAMBOO_NUM_OF_CORE][ptr->type];
948 /*Remove object from all queues */
949 for(i = 0; i < length; ++i) {
951 ObjectHashget(flagptr->objectset, (int) ptr, (int *) &next,
952 (int *) &enterflags, &UNUSED, &UNUSED2);
953 ObjectHashremove(flagptr->objectset, (int)ptr);
954 if (enterflags!=NULL)
959 void enqueueObject(void * vptr,
960 struct parameterwrapper ** vqueues,
962 struct ___Object___ *ptr = (struct ___Object___ *)vptr;
965 //struct QueueItem *tmpptr;
966 struct parameterwrapper * parameter=NULL;
969 struct parameterwrapper * prevptr=NULL;
970 struct ___Object___ *tagptr=NULL;
971 struct parameterwrapper ** queues = vqueues;
972 int length = vlength;
973 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
977 queues = objectqueues[BAMBOO_NUM_OF_CORE][ptr->type];
978 length = numqueues[BAMBOO_NUM_OF_CORE][ptr->type];
980 tagptr=ptr->___tags___;
982 /* Outer loop iterates through all parameter queues an object of
983 this type could be in. */
984 for(j = 0; j < length; ++j) {
985 parameter = queues[j];
987 if (parameter->numbertags>0) {
989 goto nextloop; //that means the object has no tag
990 //but that param needs tag
991 else if(tagptr->type==TAGTYPE) { //one tag
992 //struct ___TagDescriptor___ * tag=
993 //(struct ___TagDescriptor___*) tagptr;
994 for(i=0; i<parameter->numbertags; i++) {
995 //slotid is parameter->tagarray[2*i];
996 int tagid=parameter->tagarray[2*i+1];
997 if (tagid!=tagptr->flag)
998 goto nextloop; /*We don't have this tag */
1000 } else { //multiple tags
1001 struct ArrayObject * ao=(struct ArrayObject *) tagptr;
1002 for(i=0; i<parameter->numbertags; i++) {
1003 //slotid is parameter->tagarray[2*i];
1004 int tagid=parameter->tagarray[2*i+1];
1006 for(j=0; j<ao->___cachedCode___; j++) {
1007 if (tagid==ARRAYGET(ao, struct ___TagDescriptor___*, j)->flag)
1018 for(i=0; i<parameter->numberofterms; i++) {
1019 int andmask=parameter->intarray[i*2];
1020 int checkmask=parameter->intarray[i*2+1];
1021 if ((ptr->flag&andmask)==checkmask) {
1022 enqueuetasks(parameter, prevptr, ptr, NULL, 0);
1033 void enqueueObject_I(void * vptr,
1034 struct parameterwrapper ** vqueues,
1036 struct ___Object___ *ptr = (struct ___Object___ *)vptr;
1039 //struct QueueItem *tmpptr;
1040 struct parameterwrapper * parameter=NULL;
1043 struct parameterwrapper * prevptr=NULL;
1044 struct ___Object___ *tagptr=NULL;
1045 struct parameterwrapper ** queues = vqueues;
1046 int length = vlength;
1047 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1050 if(queues == NULL) {
1051 queues = objectqueues[BAMBOO_NUM_OF_CORE][ptr->type];
1052 length = numqueues[BAMBOO_NUM_OF_CORE][ptr->type];
1054 tagptr=ptr->___tags___;
1056 /* Outer loop iterates through all parameter queues an object of
1057 this type could be in. */
1058 for(j = 0; j < length; ++j) {
1059 parameter = queues[j];
1061 if (parameter->numbertags>0) {
1063 goto nextloop; //that means the object has no tag
1064 //but that param needs tag
1065 else if(tagptr->type==TAGTYPE) { //one tag
1066 //struct ___TagDescriptor___ * tag=(struct ___TagDescriptor___*) tagptr;
1067 for(i=0; i<parameter->numbertags; i++) {
1068 //slotid is parameter->tagarray[2*i];
1069 int tagid=parameter->tagarray[2*i+1];
1070 if (tagid!=tagptr->flag)
1071 goto nextloop; /*We don't have this tag */
1073 } else { //multiple tags
1074 struct ArrayObject * ao=(struct ArrayObject *) tagptr;
1075 for(i=0; i<parameter->numbertags; i++) {
1076 //slotid is parameter->tagarray[2*i];
1077 int tagid=parameter->tagarray[2*i+1];
1079 for(j=0; j<ao->___cachedCode___; j++) {
1080 if (tagid==ARRAYGET(ao, struct ___TagDescriptor___*, j)->flag)
1091 for(i=0; i<parameter->numberofterms; i++) {
1092 int andmask=parameter->intarray[i*2];
1093 int checkmask=parameter->intarray[i*2+1];
1094 if ((ptr->flag&andmask)==checkmask) {
1095 enqueuetasks_I(parameter, prevptr, ptr, NULL, 0);
1107 int * getAliasLock(void ** ptrs,
1109 struct RuntimeHash * tbl) {
1111 return (int*)(RUNMALLOC(sizeof(int)));
1116 bool redirect = false;
1117 int redirectlock = 0;
1118 for(; i < length; i++) {
1119 struct ___Object___ * ptr = (struct ___Object___ *)(ptrs[i]);
1122 if(ptr->lock == NULL) {
1125 lock = (int)(ptr->lock);
1128 if(lock != redirectlock) {
1129 RuntimeHashadd(tbl, lock, redirectlock);
1132 if(RuntimeHashcontainskey(tbl, lock)) {
1133 // already redirected
1135 RuntimeHashget(tbl, lock, &redirectlock);
1136 for(; j < locklen; j++) {
1137 if(locks[j] != redirectlock) {
1138 RuntimeHashadd(tbl, locks[j], redirectlock);
1143 for(j = 0; j < locklen; j++) {
1144 if(locks[j] == lock) {
1147 } else if(locks[j] > lock) {
1154 locks[h] = locks[h-1];
1163 return (int *)redirectlock;
1165 return (int *)(locks[0]);
1170 void addAliasLock(void * ptr,
1172 struct ___Object___ * obj = (struct ___Object___ *)ptr;
1173 if(((int)ptr != lock) && (obj->lock != (int*)lock)) {
1174 // originally no alias lock associated or have a different alias lock
1175 // flush it as the new one
1176 obj->lock = (int *)lock;
1181 inline void setTaskExitIndex(int index) {
1182 taskInfoArray[taskInfoIndex]->exitIndex = index;
1185 inline void addNewObjInfo(void * nobj) {
1186 if(taskInfoArray[taskInfoIndex]->newObjs == NULL) {
1187 taskInfoArray[taskInfoIndex]->newObjs = createQueue();
1189 addNewItem(taskInfoArray[taskInfoIndex]->newObjs, nobj);
1194 struct freeMemItem * findFreeMemChunk_I(int coren,
1197 struct freeMemItem * freemem = bamboo_free_mem_list->head;
1198 struct freeMemItem * prev = NULL;
1201 *tofindb = gc_core2block[2*coren+i]+(NUMCORES*2)*j;
1202 // check available shared mem chunks
1205 switch(bamboo_smem_mode) {
1207 int startb = freemem->startblock;
1208 int endb = freemem->endblock;
1209 while(startb > *tofindb) {
1215 *tofindb = gc_core2block[2*coren+i]+(NUMCORES*2)*j;
1216 } // while(startb > tofindb)
1217 if(startb <= *tofindb) {
1218 if((endb >= *tofindb) && (freemem->size >= isize)) {
1220 } else if(*tofindb > gcnumblock-1) {
1221 // no more local mem
1223 } // if(endb >= tofindb)
1224 } // if(startb <= tofindb)
1229 int startb = freemem->startblock;
1230 int endb = freemem->endblock;
1231 if(startb <= *tofindb) {
1232 if((endb >= *tofindb) && (freemem->size >= isize)) {
1236 // use the global mem
1237 if(((startb > NUMCORES-1) && (freemem->size >= isize)) ||
1238 ((endb > NUMCORES-1) && ((freemem->size-
1239 (gcbaseva+BAMBOO_LARGE_SMEM_BOUND-freemem->ptr))>=isize))) {
1247 // TODO not supported yet
1248 BAMBOO_EXIT(0xe001);
1253 foundsmem = (freemem->size >= isize);
1260 if(1 == foundsmem) {
1263 } else if (2 == foundsmem) {
1264 // terminate, no more mem
1268 if(freemem->size == 0) {
1269 // an empty item, remove it
1270 struct freeMemItem * toremove = freemem;
1271 freemem = freemem->next;
1274 bamboo_free_mem_list->head = freemem;
1276 prev->next = freemem;
1278 // put it to the tail of the list for reuse
1279 toremove->next = bamboo_free_mem_list->backuplist;
1280 bamboo_free_mem_list->backuplist = toremove;
1283 freemem = freemem->next;
1285 } while(freemem != NULL);
1288 } // struct freeMemItem * findFreeMemChunk_I(int, int, int *)
1290 void * localmalloc_I(int tofindb,
1292 struct freeMemItem * freemem,
1295 int startb = freemem->startblock;
1296 int endb = freemem->endblock;
1297 int tmpptr = gcbaseva+((tofindb<NUMCORES)?tofindb*BAMBOO_SMEM_SIZE_L
1298 :BAMBOO_LARGE_SMEM_BOUND+(tofindb-NUMCORES)*BAMBOO_SMEM_SIZE);
1299 if((freemem->size+freemem->ptr-tmpptr)>=isize) {
1300 mem = (tmpptr>freemem->ptr)?((void *)tmpptr):(freemem->ptr);
1302 mem = (void *)(freemem->size+freemem->ptr-isize);
1304 // check the remaining space in this block
1305 int remain = (int)(mem-gcbaseva);
1306 int bound = (BAMBOO_SMEM_SIZE);
1307 if(remain < BAMBOO_LARGE_SMEM_BOUND) {
1308 bound = (BAMBOO_SMEM_SIZE_L);
1310 remain = bound - remain%bound;
1311 if(remain < isize) {
1312 // this object acrosses blocks
1315 // round the asigned block to the end of the current block
1316 *allocsize = remain;
1318 if(freemem->ptr == (int)mem) {
1319 freemem->ptr = ((void*)freemem->ptr) + (*allocsize);
1320 freemem->size -= *allocsize;
1321 BLOCKINDEX(freemem->ptr, &(freemem->startblock));
1322 } else if((freemem->ptr+freemem->size) == ((int)mem+(*allocsize))) {
1323 freemem->size -= *allocsize;
1324 BLOCKINDEX(((int)mem)-1, &(freemem->endblock));
1326 struct freeMemItem * tmp =
1327 (struct freeMemItem *)RUNMALLOC_I(sizeof(struct freeMemItem));
1328 tmp->ptr = (int)mem+*allocsize;
1329 tmp->size = freemem->ptr+freemem->size-(int)mem-*allocsize;
1330 BLOCKINDEX(tmp->ptr, &(tmp->startblock));
1331 tmp->endblock = freemem->endblock;
1332 tmp->next = freemem->next;
1333 freemem->next = tmp;
1334 freemem->size = (int)mem - freemem->ptr;
1335 BLOCKINDEX(((int)mem-1), &(freemem->endblock));
1338 } // void * localmalloc_I(int, int, struct freeMemItem *, int *)
1340 void * globalmalloc_I(int isize,
1341 struct freeMemItem * freemem,
1343 void * mem = (void *)(freemem->ptr);
1344 // check the remaining space in this block
1345 int remain = (int)(mem-(BAMBOO_BASE_VA));
1346 int bound = (BAMBOO_SMEM_SIZE);
1347 if(remain < BAMBOO_LARGE_SMEM_BOUND) {
1348 bound = (BAMBOO_SMEM_SIZE_L);
1350 remain = bound - remain%bound;
1351 if(remain < isize) {
1352 // this object acrosses blocks
1355 // round the asigned block to the end of the current block
1356 *allocsize = remain;
1358 freemem->ptr = ((void*)freemem->ptr) + (*allocsize);
1359 freemem->size -= *allocsize;
1361 } // void * globalmalloc_I(int, struct freeMemItem *, int *)
1364 // malloc from the shared memory
1365 void * smemalloc_I(int coren,
1370 int isize = size+(BAMBOO_CACHE_LINE_SIZE);
1371 int toallocate = (isize>(BAMBOO_SMEM_SIZE)) ? (isize):(BAMBOO_SMEM_SIZE);
1372 // go through free mem list for suitable chunks
1374 struct freeMemItem * freemem = findFreeMemChunk_I(coren, isize, &tofindb);
1376 // allocate shared mem if available
1377 if(freemem != NULL) {
1378 switch(bamboo_smem_mode) {
1380 mem = localmalloc_I(tofindb, isize, freemem, allocsize);
1385 int startb = freemem->startblock;
1386 int endb = freemem->endblock;
1387 if(startb > tofindb) {
1388 // malloc on global mem
1389 mem = globalmalloc_I(isize, freemem, allocsize);
1391 // malloc on local mem
1392 mem = localmalloc_I(tofindb, isize, freemem, allocsize);
1398 // TODO not supported yet
1399 BAMBOO_EXIT(0xe002);
1404 mem = globalmalloc_I(isize,freemem, allocsize);
1413 int toallocate = (size>(BAMBOO_SMEM_SIZE)) ? (size):(BAMBOO_SMEM_SIZE);
1414 mem = mspace_calloc(bamboo_free_msp, 1, toallocate);
1415 *allocsize = toallocate;
1418 // no enough shared global memory
1424 BAMBOO_DEBUGPRINT(0xa001);
1425 BAMBOO_EXIT(0xa001);
1429 } // void * smemalloc_I(int, int, int)
1431 // receive object transferred from other cores
1432 // or the terminate message from other cores
1433 // Should be invoked in critical sections!!
1434 // NOTICE: following format is for threadsimulate version only
1435 // RAW version please see previous description
1436 // format: type + object
1437 // type: -1--stall msg
1439 // return value: 0--received an object
1440 // 1--received nothing
1441 // 2--received a Stall Msg
1442 // 3--received a lock Msg
1443 // RAW version: -1 -- received nothing
1444 // otherwise -- received msg type
1445 int receiveObject() {
1449 if(receiveMsg() == -1) {
1453 if(msgdataindex == msglength) {
1454 // received a whole msg
1459 // receive a object transfer msg
1460 struct transObjInfo * transObj =
1461 RUNMALLOC_I(sizeof(struct transObjInfo));
1465 BAMBOO_DEBUGPRINT(0xe880);
1468 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1470 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1472 BAMBOO_EXIT(0xa002);
1474 // store the object and its corresponding queue info, enqueue it later
1475 transObj->objptr = (void *)msgdata[2];
1476 transObj->length = (msglength - 3) / 2;
1477 transObj->queues = RUNMALLOC_I(sizeof(int)*(msglength - 3));
1478 for(k = 0; k < transObj->length; ++k) {
1479 transObj->queues[2*k] = msgdata[3+2*k];
1482 BAMBOO_DEBUGPRINT_REG(transObj->queues[2*k]);
1485 transObj->queues[2*k+1] = msgdata[3+2*k+1];
1488 BAMBOO_DEBUGPRINT_REG(transObj->queues[2*k+1]);
1492 // check if there is an existing duplicate item
1494 struct QueueItem * qitem = getHead(&objqueue);
1495 struct QueueItem * prev = NULL;
1496 while(qitem != NULL) {
1497 struct transObjInfo * tmpinfo =
1498 (struct transObjInfo *)(qitem->objectptr);
1499 if(tmpinfo->objptr == transObj->objptr) {
1500 // the same object, remove outdate one
1501 removeItem(&objqueue, qitem);
1507 qitem = getHead(&objqueue);
1509 qitem = getNextQueueItem(prev);
1512 addNewItem_I(&objqueue, (void *)transObj);
1514 ++(self_numreceiveobjs);
1519 // receive a stall msg
1520 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1521 // non startup core can not receive stall msg
1523 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1525 BAMBOO_EXIT(0xa003);
1527 if(msgdata[1] < NUMCORESACTIVE) {
1530 BAMBOO_DEBUGPRINT(0xe881);
1533 corestatus[msgdata[1]] = 0;
1534 numsendobjs[msgdata[1]] = msgdata[2];
1535 numreceiveobjs[msgdata[1]] = msgdata[3];
1540 // GC version have no lock msgs
1541 #ifndef MULTICORE_GC
1543 // receive lock request msg, handle it right now
1544 // check to see if there is a lock exist for the required obj
1545 // msgdata[1] -> lock type
1546 int data2 = msgdata[2]; // obj pointer
1547 int data3 = msgdata[3]; // lock
1548 int data4 = msgdata[4]; // request core
1549 // -1: redirected, 0: approved, 1: denied
1550 deny = processlockrequest(msgdata[1], data3, data2,
1551 data4, data4, true);
1553 // this lock request is redirected
1556 // send response msg
1557 // for 32 bit machine, the size is always 4 words
1558 int tmp = deny==1?LOCKDENY:LOCKGROUNT;
1560 cache_msg_4(data4, tmp, msgdata[1], data2, data3);
1562 send_msg_4(data4, tmp, msgdata[1], data2, data3);
1569 // receive lock grount msg
1570 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1572 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1574 BAMBOO_EXIT(0xa004);
1576 if((lockobj == msgdata[2]) && (lock2require == msgdata[3])) {
1579 BAMBOO_DEBUGPRINT(0xe882);
1588 // conflicts on lockresults
1590 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1592 BAMBOO_EXIT(0xa005);
1598 // receive lock deny msg
1599 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1601 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1603 BAMBOO_EXIT(0xa006);
1605 if((lockobj == msgdata[2]) && (lock2require == msgdata[3])) {
1608 BAMBOO_DEBUGPRINT(0xe883);
1617 // conflicts on lockresults
1619 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1621 BAMBOO_EXIT(0xa007);
1627 // receive lock release msg
1628 processlockrelease(msgdata[1], msgdata[2], 0, false);
1634 case PROFILEOUTPUT: {
1635 // receive an output profile data request msg
1636 if(BAMBOO_NUM_OF_CORE == STARTUPCORE) {
1637 // startup core can not receive profile output finish msg
1638 BAMBOO_EXIT(0xa008);
1642 BAMBOO_DEBUGPRINT(0xe885);
1646 totalexetime = msgdata[1];
1647 outputProfileData();
1649 cache_msg_2(STARTUPCORE, PROFILEFINISH, BAMBOO_NUM_OF_CORE);
1651 send_msg_2(STARTUPCORE, PROFILEFINISH, BAMBOO_NUM_OF_CORE);
1656 case PROFILEFINISH: {
1657 // receive a profile output finish msg
1658 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1659 // non startup core can not receive profile output finish msg
1661 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1663 BAMBOO_EXIT(0xa009);
1667 BAMBOO_DEBUGPRINT(0xe886);
1670 profilestatus[msgdata[1]] = 0;
1675 // GC version has no lock msgs
1676 #ifndef MULTICORE_GC
1677 case REDIRECTLOCK: {
1678 // receive a redirect lock request msg, handle it right now
1679 // check to see if there is a lock exist for the required obj
1680 int data1 = msgdata[1]; // lock type
1681 int data2 = msgdata[2]; // obj pointer
1682 int data3 = msgdata[3]; // redirect lock
1683 int data4 = msgdata[4]; // root request core
1684 int data5 = msgdata[5]; // request core
1685 deny = processlockrequest(msgdata[1], data3, data2, data5, data4, true);
1687 // this lock request is redirected
1690 // send response msg
1691 // for 32 bit machine, the size is always 4 words
1693 cache_msg_4(data4, deny==1?REDIRECTDENY:REDIRECTGROUNT,
1694 data1, data2, data3);
1696 send_msg_4(data4, deny==1?REDIRECTDENY:REDIRECTGROUNT,
1697 data1, data2, data3);
1703 case REDIRECTGROUNT: {
1704 // receive a lock grant msg with redirect info
1705 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1707 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1709 BAMBOO_EXIT(0xa00a);
1711 if(lockobj == msgdata[2]) {
1714 BAMBOO_DEBUGPRINT(0xe891);
1719 RuntimeHashadd_I(objRedirectLockTbl, lockobj, msgdata[3]);
1724 // conflicts on lockresults
1726 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1728 BAMBOO_EXIT(0xa00b);
1733 case REDIRECTDENY: {
1734 // receive a lock deny msg with redirect info
1735 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
1737 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1739 BAMBOO_EXIT(0xa00c);
1741 if(lockobj == msgdata[2]) {
1744 BAMBOO_DEBUGPRINT(0xe892);
1753 // conflicts on lockresults
1755 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1757 BAMBOO_EXIT(0xa00d);
1762 case REDIRECTRELEASE: {
1763 // receive a lock release msg with redirect info
1764 processlockrelease(msgdata[1], msgdata[2], msgdata[3], true);
1769 case STATUSCONFIRM: {
1770 // receive a status confirm info
1771 if((BAMBOO_NUM_OF_CORE == STARTUPCORE)
1772 || (BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1)) {
1773 // wrong core to receive such msg
1774 BAMBOO_EXIT(0xa00e);
1776 // send response msg
1779 BAMBOO_DEBUGPRINT(0xe887);
1783 cache_msg_5(STARTUPCORE, STATUSREPORT,
1784 busystatus?1:0, BAMBOO_NUM_OF_CORE,
1785 self_numsendobjs, self_numreceiveobjs);
1787 send_msg_5(STARTUPCORE, STATUSREPORT,
1788 busystatus?1:0, BAMBOO_NUM_OF_CORE,
1789 self_numsendobjs, self_numreceiveobjs);
1795 case STATUSREPORT: {
1796 // receive a status confirm info
1797 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1798 // wrong core to receive such msg
1800 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1802 BAMBOO_EXIT(0xa00f);
1806 BAMBOO_DEBUGPRINT(0xe888);
1812 corestatus[msgdata[2]] = msgdata[1];
1813 numsendobjs[msgdata[2]] = msgdata[3];
1814 numreceiveobjs[msgdata[2]] = msgdata[4];
1820 // receive a terminate msg
1823 BAMBOO_DEBUGPRINT(0xe889);
1832 // receive a shared memory request msg
1833 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1834 // wrong core to receive such msg
1836 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
1838 BAMBOO_EXIT(0xa010);
1842 BAMBOO_DEBUGPRINT(0xe88a);
1847 // is currently doing gc, dump this msg
1852 void * mem = smemalloc_I(msgdata[2], msgdata[1], &allocsize);
1856 // send the start_va to request core
1858 cache_msg_3(msgdata[2], MEMRESPONSE, mem, allocsize);
1860 send_msg_3( msgdata[2], MEMRESPONSE, mem, allocsize);
1867 // receive a shared memory response msg
1870 BAMBOO_DEBUGPRINT(0xe88b);
1875 // is currently doing gc, dump this msg
1879 if(msgdata[2] == 0) {
1880 bamboo_smem_size = 0;
1884 // fill header to store the size of this mem block
1885 (*((int*)msgdata[1])) = msgdata[2];
1886 bamboo_smem_size = msgdata[2] - BAMBOO_CACHE_LINE_SIZE;
1887 bamboo_cur_msp = msgdata[1] + BAMBOO_CACHE_LINE_SIZE;
1889 bamboo_smem_size = msgdata[2];
1890 bamboo_cur_msp =(void*)(msgdata[1]);
1901 gcphase = INITPHASE;
1903 // is waiting for response of mem request
1904 // let it return NULL and start gc
1905 bamboo_smem_size = 0;
1906 bamboo_cur_msp = NULL;
1913 // receive a start GC msg
1916 BAMBOO_DEBUGPRINT(0xe88c);
1920 gcphase = MARKPHASE;
1924 case GCSTARTCOMPACT: {
1925 // a compact phase start msg
1926 gcblock2fill = msgdata[1];
1927 gcphase = COMPACTPHASE;
1931 case GCSTARTFLUSH: {
1932 // received a flush phase start msg
1933 gcphase = FLUSHPHASE;
1937 case GCFINISHINIT: {
1938 // received a init phase finish msg
1939 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1940 // non startup core can not receive this msg
1942 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1944 BAMBOO_EXIT(0xb001);
1947 BAMBOO_DEBUGPRINT(0xe88c);
1948 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1950 if(msgdata[1] < NUMCORES) {
1951 gccorestatus[msgdata[1]] = 0;
1955 case GCFINISHMARK: {
1956 // received a mark phase finish msg
1957 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1958 // non startup core can not receive this msg
1960 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1962 BAMBOO_EXIT(0xb002);
1964 if(msgdata[1] < NUMCORES) {
1965 gccorestatus[msgdata[1]] = 0;
1966 gcnumsendobjs[msgdata[1]] = msgdata[2];
1967 gcnumreceiveobjs[msgdata[1]] = msgdata[3];
1972 case GCFINISHCOMPACT: {
1973 // received a compact phase finish msg
1974 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
1975 // non startup core can not receive this msg
1978 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
1980 BAMBOO_EXIT(0xb003);
1982 int cnum = msgdata[1];
1983 int filledblocks = msgdata[2];
1984 int heaptop = msgdata[3];
1985 int data4 = msgdata[4];
1986 if(cnum < NUMCORES) {
1987 if(COMPACTPHASE == gcphase) {
1988 gcfilledblocks[cnum] = filledblocks;
1989 gcloads[cnum] = heaptop;
1996 if(gcfindSpareMem_I(&startaddr, &tomove, &dstcore, data4, cnum)) {
1998 cache_msg_4(cnum, GCMOVESTART, dstcore, startaddr, tomove);
2000 send_msg_4(cnum, GCMOVESTART, dstcore, startaddr, tomove);
2004 gccorestatus[cnum] = 0;
2005 // check if there is pending move request
2006 /*if(gcmovepending > 0) {
2008 for(j = 0; j < NUMCORES; j++) {
2009 if(gcrequiredmems[j]>0) {
2017 gcrequiredmems[j] = assignSpareMem_I(cnum,
2021 if(STARTUPCORE == j) {
2024 gcmovestartaddr = startaddr;
2025 gcblock2fill = tomove;
2028 cache_msg_4(j, GCMOVESTART, cnum, startaddr, tomove);
2030 send_msg_4(j, GCMOVESTART, cnum, startaddr, tomove);
2032 } // if(STARTUPCORE == j)
2033 if(gcrequiredmems[j] == 0) {
2036 } // if(j < NUMCORES)
2037 } // if(gcmovepending > 0) */
2039 } // if(cnum < NUMCORES)
2043 case GCFINISHFLUSH: {
2044 // received a flush phase finish msg
2045 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
2046 // non startup core can not receive this msg
2049 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
2051 BAMBOO_EXIT(0xb004);
2053 if(msgdata[1] < NUMCORES) {
2054 gccorestatus[msgdata[1]] = 0;
2060 // received a GC finish msg
2061 gcphase = FINISHPHASE;
2065 case GCMARKCONFIRM: {
2066 // received a marked phase finish confirm request msg
2067 if((BAMBOO_NUM_OF_CORE == STARTUPCORE)
2068 || (BAMBOO_NUM_OF_CORE > NUMCORES - 1)) {
2069 // wrong core to receive such msg
2070 BAMBOO_EXIT(0xb005);
2072 // send response msg
2074 cache_msg_5(STARTUPCORE, GCMARKREPORT, BAMBOO_NUM_OF_CORE,
2075 gcbusystatus, gcself_numsendobjs,
2076 gcself_numreceiveobjs);
2078 send_msg_5(STARTUPCORE, GCMARKREPORT, BAMBOO_NUM_OF_CORE,
2079 gcbusystatus, gcself_numsendobjs, gcself_numreceiveobjs);
2085 case GCMARKREPORT: {
2086 // received a marked phase finish confirm response msg
2087 if(BAMBOO_NUM_OF_CORE != STARTUPCORE) {
2088 // wrong core to receive such msg
2090 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
2092 BAMBOO_EXIT(0xb006);
2097 gccorestatus[msgdata[1]] = msgdata[2];
2098 gcnumsendobjs[msgdata[1]] = msgdata[3];
2099 gcnumreceiveobjs[msgdata[1]] = msgdata[4];
2105 // received a markedObj msg
2106 gc_enqueue_I(msgdata[1]);
2107 gcself_numreceiveobjs++;
2108 gcbusystatus = true;
2113 // received a start moving objs msg
2115 gcdstcore = msgdata[1];
2116 gcmovestartaddr = msgdata[2];
2117 gcblock2fill = msgdata[3];
2121 case GCMAPREQUEST: {
2122 // received a mapping info request msg
2123 void * dstptr = NULL;
2124 RuntimeHashget(gcpointertbl, msgdata[1], &dstptr);
2125 if(NULL == dstptr) {
2126 // no such pointer in this core, something is wrong
2128 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
2129 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
2131 BAMBOO_EXIT(0xb007);
2133 // send back the mapping info
2135 cache_msg_3(msgdata[2], GCMAPINFO, msgdata[1], (int)dstptr);
2137 send_msg_3(msgdata[2], GCMAPINFO, msgdata[1], (int)dstptr);
2144 // received a mapping info response msg
2145 if(msgdata[1] != gcobj2map) {
2146 // obj not matched, something is wrong
2148 BAMBOO_DEBUGPRINT_REG(gcobj2map);
2149 BAMBOO_DEBUGPRINT_REG(msgdata[1]);
2151 BAMBOO_EXIT(0xb008);
2153 gcmappedobj = msgdata[2];
2154 RuntimeHashadd_I(gcpointertbl, gcobj2map, gcmappedobj);
2160 case GCLOBJREQUEST: {
2161 // received a large objs info request msg
2162 transferMarkResults_I();
2167 // received a large objs info response msg
2170 if(BAMBOO_NUM_OF_CORE > NUMCORES - 1) {
2172 BAMBOO_DEBUGPRINT_REG(msgdata[2]);
2174 BAMBOO_EXIT(0xb009);
2176 // store the mark result info
2177 int cnum = msgdata[2];
2178 gcloads[cnum] = msgdata[3];
2179 if(gcheaptop < msgdata[4]) {
2180 gcheaptop = msgdata[4];
2182 // large obj info here
2183 for(int k = 5; k < msgdata[1];) {
2184 int lobj = msgdata[k++];
2185 int length = msgdata[k++];
2186 gc_lobjenqueue_I(lobj, length, cnum);
2188 } // for(int k = 5; k < msgdata[1];)
2192 case GCLOBJMAPPING: {
2193 // received a large obj mapping info msg
2194 RuntimeHashadd_I(gcpointertbl, msgdata[1], msgdata[2]);
2203 for(; msgdataindex > 0; --msgdataindex) {
2204 msgdata[msgdataindex-1] = -1;
2206 msglength = BAMBOO_MSG_BUF_LENGTH;
2209 BAMBOO_DEBUGPRINT(0xe88d);
2213 if(BAMBOO_MSG_AVAIL() != 0) {
2226 BAMBOO_DEBUGPRINT(0xe88e);
2230 /* if(isInterrupt) {
2238 int enqueuetasks(struct parameterwrapper *parameter,
2239 struct parameterwrapper *prevptr,
2240 struct ___Object___ *ptr,
2242 int numenterflags) {
2243 void * taskpointerarray[MAXTASKPARAMS];
2245 //int numparams=parameter->task->numParameters;
2246 int numiterators=parameter->task->numTotal-1;
2249 struct taskdescriptor * task=parameter->task;
2251 //this add the object to parameterwrapper
2252 ObjectHashadd(parameter->objectset, (int) ptr, 0, (int) enterflags,
2253 numenterflags, enterflags==NULL);
2255 /* Add enqueued object to parameter vector */
2256 taskpointerarray[parameter->slot]=ptr;
2258 /* Reset iterators */
2259 for(j=0; j<numiterators; j++) {
2260 toiReset(¶meter->iterators[j]);
2263 /* Find initial state */
2264 for(j=0; j<numiterators; j++) {
2266 if(toiHasNext(¶meter->iterators[j],taskpointerarray OPTARG(failed)))
2267 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2269 /* Need to backtrack */
2270 toiReset(¶meter->iterators[j]);
2274 /* Nothing to enqueue */
2280 /* Enqueue current state */
2282 struct taskparamdescriptor *tpd=
2283 RUNMALLOC(sizeof(struct taskparamdescriptor));
2285 tpd->numParameters=numiterators+1;
2286 tpd->parameterArray=RUNMALLOC(sizeof(void *)*(numiterators+1));
2288 for(j=0; j<=numiterators; j++) {
2289 //store the actual parameters
2290 tpd->parameterArray[j]=taskpointerarray[j];
2293 if ((/*!gencontains(failedtasks, tpd)&&*/
2294 !gencontains(activetasks,tpd))) {
2295 genputtable(activetasks, tpd, tpd);
2297 RUNFREE(tpd->parameterArray);
2301 /* This loop iterates to the next parameter combination */
2302 if (numiterators==0)
2305 for(j=numiterators-1; j<numiterators; j++) {
2307 if(toiHasNext(¶meter->iterators[j],taskpointerarray OPTARG(failed)))
2308 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2310 /* Need to backtrack */
2311 toiReset(¶meter->iterators[j]);
2315 /* Nothing more to enqueue */
2323 int enqueuetasks_I(struct parameterwrapper *parameter,
2324 struct parameterwrapper *prevptr,
2325 struct ___Object___ *ptr,
2327 int numenterflags) {
2328 void * taskpointerarray[MAXTASKPARAMS];
2330 //int numparams=parameter->task->numParameters;
2331 int numiterators=parameter->task->numTotal-1;
2336 struct taskdescriptor * task=parameter->task;
2338 //this add the object to parameterwrapper
2339 ObjectHashadd_I(parameter->objectset, (int) ptr, 0, (int) enterflags,
2340 numenterflags, enterflags==NULL);
2342 /* Add enqueued object to parameter vector */
2343 taskpointerarray[parameter->slot]=ptr;
2345 /* Reset iterators */
2346 for(j=0; j<numiterators; j++) {
2347 toiReset(¶meter->iterators[j]);
2350 /* Find initial state */
2351 for(j=0; j<numiterators; j++) {
2353 if(toiHasNext(¶meter->iterators[j],taskpointerarray OPTARG(failed)))
2354 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2356 /* Need to backtrack */
2357 toiReset(¶meter->iterators[j]);
2361 /* Nothing to enqueue */
2367 /* Enqueue current state */
2369 struct taskparamdescriptor *tpd=
2370 RUNMALLOC_I(sizeof(struct taskparamdescriptor));
2372 tpd->numParameters=numiterators+1;
2373 tpd->parameterArray=RUNMALLOC_I(sizeof(void *)*(numiterators+1));
2375 for(j=0; j<=numiterators; j++) {
2376 //store the actual parameters
2377 tpd->parameterArray[j]=taskpointerarray[j];
2380 if ((/*!gencontains(failedtasks, tpd)&&*/
2381 !gencontains(activetasks,tpd))) {
2382 genputtable_I(activetasks, tpd, tpd);
2384 RUNFREE(tpd->parameterArray);
2388 /* This loop iterates to the next parameter combination */
2389 if (numiterators==0)
2392 for(j=numiterators-1; j<numiterators; j++) {
2394 if(toiHasNext(¶meter->iterators[j], taskpointerarray OPTARG(failed)))
2395 toiNext(¶meter->iterators[j], taskpointerarray OPTARG(failed));
2397 /* Need to backtrack */
2398 toiReset(¶meter->iterators[j]);
2402 /* Nothing more to enqueue */
2416 int containstag(struct ___Object___ *ptr,
2417 struct ___TagDescriptor___ *tag);
2419 #ifndef MULTICORE_GC
2420 void releasewritelock_r(void * lock, void * redirectlock) {
2422 int reallock = (int)lock;
2423 targetcore = (reallock >> 5) % BAMBOO_TOTALCORE;
2426 BAMBOO_DEBUGPRINT(0xe671);
2427 BAMBOO_DEBUGPRINT_REG((int)lock);
2428 BAMBOO_DEBUGPRINT_REG(reallock);
2429 BAMBOO_DEBUGPRINT_REG(targetcore);
2432 if(targetcore == BAMBOO_NUM_OF_CORE) {
2433 BAMBOO_START_CRITICAL_SECTION_LOCK();
2435 BAMBOO_DEBUGPRINT(0xf001);
2437 // reside on this core
2438 if(!RuntimeHashcontainskey(locktbl, reallock)) {
2439 // no locks for this object, something is wrong
2440 BAMBOO_EXIT(0xa011);
2443 struct LockValue * lockvalue = NULL;
2445 BAMBOO_DEBUGPRINT(0xe672);
2447 RuntimeHashget(locktbl, reallock, &rwlock_obj);
2448 lockvalue = (struct LockValue *)rwlock_obj;
2450 BAMBOO_DEBUGPRINT_REG(lockvalue->value);
2453 lockvalue->redirectlock = (int)redirectlock;
2455 BAMBOO_DEBUGPRINT_REG(lockvalue->value);
2458 BAMBOO_CLOSE_CRITICAL_SECTION_LOCK();
2460 BAMBOO_DEBUGPRINT(0xf000);
2464 // send lock release with redirect info msg
2465 // for 32 bit machine, the size is always 4 words
2466 send_msg_4(targetcore, REDIRECTRELEASE, 1, (int)lock, (int)redirectlock);
2471 void executetasks() {
2472 void * taskpointerarray[MAXTASKPARAMS+OFFSET];
2475 struct ___Object___ * tmpparam = NULL;
2476 struct parameterdescriptor * pd=NULL;
2477 struct parameterwrapper *pw=NULL;
2487 while(hashsize(activetasks)>0) {
2492 BAMBOO_DEBUGPRINT(0xe990);
2495 /* See if there are any active tasks */
2496 if (hashsize(activetasks)>0) {
2499 #ifdef ACCURATEPROFILE
2500 profileTaskStart("tpd checking");
2504 currtpd=(struct taskparamdescriptor *) getfirstkey(activetasks);
2505 genfreekey(activetasks, currtpd);
2507 numparams=currtpd->task->numParameters;
2508 numtotal=currtpd->task->numTotal;
2510 // clear the lockRedirectTbl
2511 // (TODO, this table should be empty after all locks are released)
2513 for(j = 0; j < MAXTASKPARAMS; j++) {
2514 runtime_locks[j].redirectlock = 0;
2515 runtime_locks[j].value = 0;
2517 // get all required locks
2518 runtime_locklen = 0;
2519 // check which locks are needed
2520 for(i = 0; i < numparams; i++) {
2521 void * param = currtpd->parameterArray[i];
2525 if(((struct ___Object___ *)param)->type == STARTUPTYPE) {
2527 taskpointerarray[i+OFFSET]=param;
2530 if(((struct ___Object___ *)param)->lock == NULL) {
2531 tmplock = (int)param;
2533 tmplock = (int)(((struct ___Object___ *)param)->lock);
2535 // insert into the locks array
2536 for(j = 0; j < runtime_locklen; j++) {
2537 if(runtime_locks[j].value == tmplock) {
2540 } else if(runtime_locks[j].value > tmplock) {
2545 int h = runtime_locklen;
2547 runtime_locks[h].redirectlock = runtime_locks[h-1].redirectlock;
2548 runtime_locks[h].value = runtime_locks[h-1].value;
2550 runtime_locks[j].value = tmplock;
2551 runtime_locks[j].redirectlock = (int)param;
2554 } // line 2713: for(i = 0; i < numparams; i++)
2555 // grab these required locks
2557 BAMBOO_DEBUGPRINT(0xe991);
2559 for(i = 0; i < runtime_locklen; i++) {
2560 int * lock = (int *)(runtime_locks[i].redirectlock);
2562 // require locks for this parameter if it is not a startup object
2564 BAMBOO_DEBUGPRINT_REG((int)lock);
2565 BAMBOO_DEBUGPRINT_REG((int)(runtime_locks[i].value));
2568 BAMBOO_START_CRITICAL_SECTION();
2570 BAMBOO_DEBUGPRINT(0xf001);
2573 //isInterrupt = false;
2576 BAMBOO_WAITING_FOR_LOCK();
2580 while(BAMBOO_WAITING_FOR_LOCK() != -1) {
2584 grount = lockresult;
2594 //isInterrupt = true;
2596 BAMBOO_CLOSE_CRITICAL_SECTION();
2598 BAMBOO_DEBUGPRINT(0xf000);
2604 BAMBOO_DEBUGPRINT(0xe992);
2605 BAMBOO_DEBUGPRINT_REG(lock);
2607 // can not get the lock, try later
2608 // releas all grabbed locks for previous parameters
2609 for(j = 0; j < i; ++j) {
2610 lock = (int*)(runtime_locks[j].redirectlock);
2611 releasewritelock(lock);
2613 genputtable(activetasks, currtpd, currtpd);
2614 if(hashsize(activetasks) == 1) {
2615 // only one task right now, wait a little while before next try
2621 #ifdef ACCURATEPROFILE
2622 // fail, set the end of the checkTaskInfo
2627 } // line 2794: if(grount == 0)
2628 } // line 2752: for(i = 0; i < runtime_locklen; i++)
2631 BAMBOO_DEBUGPRINT(0xe993);
2633 /* Make sure that the parameters are still in the queues */
2634 for(i=0; i<numparams; i++) {
2635 void * parameter=currtpd->parameterArray[i];
2639 BAMBOO_CACHE_FLUSH_RANGE((int)parameter,
2640 classsize[((struct ___Object___ *)parameter)->type]);
2642 tmpparam = (struct ___Object___ *)parameter;
2643 pd=currtpd->task->descriptorarray[i];
2644 pw=(struct parameterwrapper *) pd->queue;
2645 /* Check that object is still in queue */
2647 if (!ObjectHashcontainskey(pw->objectset, (int) parameter)) {
2649 BAMBOO_DEBUGPRINT(0xe994);
2650 BAMBOO_DEBUGPRINT_REG(parameter);
2652 // release grabbed locks
2653 for(j = 0; j < runtime_locklen; ++j) {
2654 int * lock = (int *)(runtime_locks[j].redirectlock);
2655 releasewritelock(lock);
2657 RUNFREE(currtpd->parameterArray);
2663 /* Check if the object's flags still meets requirements */
2667 for(tmpi = 0; tmpi < pw->numberofterms; ++tmpi) {
2668 andmask=pw->intarray[tmpi*2];
2669 checkmask=pw->intarray[tmpi*2+1];
2670 if((((struct ___Object___ *)parameter)->flag&andmask)==checkmask) {
2676 // flags are never suitable
2677 // remove this obj from the queue
2679 int UNUSED, UNUSED2;
2682 BAMBOO_DEBUGPRINT(0xe995);
2683 BAMBOO_DEBUGPRINT_REG(parameter);
2685 ObjectHashget(pw->objectset, (int) parameter, (int *) &next,
2686 (int *) &enterflags, &UNUSED, &UNUSED2);
2687 ObjectHashremove(pw->objectset, (int)parameter);
2688 if (enterflags!=NULL)
2689 RUNFREE(enterflags);
2690 // release grabbed locks
2691 for(j = 0; j < runtime_locklen; ++j) {
2692 int * lock = (int *)(runtime_locks[j].redirectlock);
2693 releasewritelock(lock);
2695 RUNFREE(currtpd->parameterArray);
2699 #ifdef ACCURATEPROFILE
2700 // fail, set the end of the checkTaskInfo
2705 } // line 2878: if (!ismet)
2709 /* Check that object still has necessary tags */
2710 for(j=0; j<pd->numbertags; j++) {
2711 int slotid=pd->tagarray[2*j]+numparams;
2712 struct ___TagDescriptor___ *tagd=currtpd->parameterArray[slotid];
2713 if (!containstag(parameter, tagd)) {
2715 BAMBOO_DEBUGPRINT(0xe996);
2718 // release grabbed locks
2720 for(tmpj = 0; tmpj < runtime_locklen; ++tmpj) {
2721 int * lock = (int *)(runtime_locks[tmpj].redirectlock);
2722 releasewritelock(lock);
2725 RUNFREE(currtpd->parameterArray);
2729 } // line2911: if (!containstag(parameter, tagd))
2730 } // line 2808: for(j=0; j<pd->numbertags; j++)
2732 taskpointerarray[i+OFFSET]=parameter;
2733 } // line 2824: for(i=0; i<numparams; i++)
2735 for(; i<numtotal; i++) {
2736 taskpointerarray[i+OFFSET]=currtpd->parameterArray[i];
2741 /* Actually call task */
2743 ((int *)taskpointerarray)[0]=currtpd->numParameters;
2744 taskpointerarray[1]=NULL;
2747 #ifdef ACCURATEPROFILE
2748 // check finish, set the end of the checkTaskInfo
2751 profileTaskStart(currtpd->task->name);
2755 BAMBOO_DEBUGPRINT(0xe997);
2757 ((void(*) (void **))currtpd->task->taskptr)(taskpointerarray);
2759 #ifdef ACCURATEPROFILE
2760 // task finish, set the end of the checkTaskInfo
2762 // new a PostTaskInfo for the post-task execution
2763 profileTaskStart("post task execution");
2767 BAMBOO_DEBUGPRINT(0xe998);
2768 BAMBOO_DEBUGPRINT_REG(islock);
2773 BAMBOO_DEBUGPRINT(0xe999);
2775 for(i = 0; i < runtime_locklen; ++i) {
2776 void * ptr = (void *)(runtime_locks[i].redirectlock);
2777 int * lock = (int *)(runtime_locks[i].value);
2779 BAMBOO_DEBUGPRINT_REG((int)ptr);
2780 BAMBOO_DEBUGPRINT_REG((int)lock);
2781 BAMBOO_DEBUGPRINT_REG(*((int*)lock+5));
2783 #ifndef MULTICORE_GC
2784 if(RuntimeHashcontainskey(lockRedirectTbl, (int)lock)) {
2786 RuntimeHashget(lockRedirectTbl, (int)lock, &redirectlock);
2787 RuntimeHashremovekey(lockRedirectTbl, (int)lock);
2788 releasewritelock_r(lock, (int *)redirectlock);
2793 releasewritelock(ptr);
2796 } // line 3015: if(islock)
2799 // post task execution finish, set the end of the postTaskInfo
2803 // Free up task parameter descriptor
2804 RUNFREE(currtpd->parameterArray);
2808 BAMBOO_DEBUGPRINT(0xe99a);
2811 } // if (hashsize(activetasks)>0)
2812 } // while(hashsize(activetasks)>0)
2814 BAMBOO_DEBUGPRINT(0xe99b);
2818 /* This function processes an objects tags */
2819 void processtags(struct parameterdescriptor *pd,
2821 struct parameterwrapper *parameter,
2822 int * iteratorcount,
2827 for(i=0; i<pd->numbertags; i++) {
2828 int slotid=pd->tagarray[2*i];
2829 int tagid=pd->tagarray[2*i+1];
2831 if (statusarray[slotid+numparams]==0) {
2832 parameter->iterators[*iteratorcount].istag=1;
2833 parameter->iterators[*iteratorcount].tagid=tagid;
2834 parameter->iterators[*iteratorcount].slot=slotid+numparams;
2835 parameter->iterators[*iteratorcount].tagobjectslot=index;
2836 statusarray[slotid+numparams]=1;
2843 void processobject(struct parameterwrapper *parameter,
2845 struct parameterdescriptor *pd,
2851 struct ObjectHash * objectset=
2852 ((struct parameterwrapper *)pd->queue)->objectset;
2854 parameter->iterators[*iteratorcount].istag=0;
2855 parameter->iterators[*iteratorcount].slot=index;
2856 parameter->iterators[*iteratorcount].objectset=objectset;
2857 statusarray[index]=1;
2859 for(i=0; i<pd->numbertags; i++) {
2860 int slotid=pd->tagarray[2*i];
2861 //int tagid=pd->tagarray[2*i+1];
2862 if (statusarray[slotid+numparams]!=0) {
2863 /* This tag has already been enqueued, use it to narrow search */
2864 parameter->iterators[*iteratorcount].tagbindings[tagcount]=
2869 parameter->iterators[*iteratorcount].numtags=tagcount;
2874 /* This function builds the iterators for a task & parameter */
2876 void builditerators(struct taskdescriptor * task,
2878 struct parameterwrapper * parameter) {
2879 int statusarray[MAXTASKPARAMS];
2881 int numparams=task->numParameters;
2882 int iteratorcount=0;
2883 for(i=0; i<MAXTASKPARAMS; i++) statusarray[i]=0;
2885 statusarray[index]=1; /* Initial parameter */
2886 /* Process tags for initial iterator */
2888 processtags(task->descriptorarray[index], index, parameter,
2889 &iteratorcount, statusarray, numparams);
2893 /* Check for objects with existing tags */
2894 for(i=0; i<numparams; i++) {
2895 if (statusarray[i]==0) {
2896 struct parameterdescriptor *pd=task->descriptorarray[i];
2898 for(j=0; j<pd->numbertags; j++) {
2899 int slotid=pd->tagarray[2*j];
2900 if(statusarray[slotid+numparams]!=0) {
2901 processobject(parameter, i, pd, &iteratorcount, statusarray,
2903 processtags(pd, i, parameter, &iteratorcount, statusarray, numparams);
2910 /* Next do objects w/ unbound tags*/
2912 for(i=0; i<numparams; i++) {
2913 if (statusarray[i]==0) {
2914 struct parameterdescriptor *pd=task->descriptorarray[i];
2915 if (pd->numbertags>0) {
2916 processobject(parameter, i, pd, &iteratorcount, statusarray, numparams);
2917 processtags(pd, i, parameter, &iteratorcount, statusarray, numparams);
2923 /* Nothing with a tag enqueued */
2925 for(i=0; i<numparams; i++) {
2926 if (statusarray[i]==0) {
2927 struct parameterdescriptor *pd=task->descriptorarray[i];
2928 processobject(parameter, i, pd, &iteratorcount, statusarray, numparams);
2929 processtags(pd, i, parameter, &iteratorcount, statusarray, numparams);
2942 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
2945 for(i=0; i<numtasks[BAMBOO_NUM_OF_CORE]; i++) {
2946 struct taskdescriptor * task=taskarray[BAMBOO_NUM_OF_CORE][i];
2948 printf("%s\n", task->name);
2950 for(j=0; j<task->numParameters; j++) {
2951 struct parameterdescriptor *param=task->descriptorarray[j];
2952 struct parameterwrapper *parameter=param->queue;
2953 struct ObjectHash * set=parameter->objectset;
2954 struct ObjectIterator objit;
2956 printf(" Parameter %d\n", j);
2958 ObjectHashiterator(set, &objit);
2959 while(ObjhasNext(&objit)) {
2960 struct ___Object___ * obj=(struct ___Object___ *)Objkey(&objit);
2961 struct ___Object___ * tagptr=obj->___tags___;
2962 int nonfailed=Objdata4(&objit);
2963 int numflags=Objdata3(&objit);
2964 int flags=Objdata2(&objit);
2967 printf(" Contains %lx\n", obj);
2968 printf(" flag=%d\n", obj->flag);
2971 } else if (tagptr->type==TAGTYPE) {
2973 printf(" tag=%lx\n",tagptr);
2979 struct ArrayObject *ao=(struct ArrayObject *)tagptr;
2980 for(; tagindex<ao->___cachedCode___; tagindex++) {
2982 printf(" tag=%lx\n",ARRAYGET(ao, struct ___TagDescriptor___*,
2995 /* This function processes the task information to create queues for
2996 each parameter type. */
2998 void processtasks() {
3000 if(BAMBOO_NUM_OF_CORE > NUMCORESACTIVE - 1) {
3003 for(i=0; i<numtasks[BAMBOO_NUM_OF_CORE]; i++) {
3004 struct taskdescriptor * task=taskarray[BAMBOO_NUM_OF_CORE][i];
3007 /* Build objectsets */
3008 for(j=0; j<task->numParameters; j++) {
3009 struct parameterdescriptor *param=task->descriptorarray[j];
3010 struct parameterwrapper *parameter=param->queue;
3011 parameter->objectset=allocateObjectHash(10);
3012 parameter->task=task;
3015 /* Build iterators for parameters */
3016 for(j=0; j<task->numParameters; j++) {
3017 struct parameterdescriptor *param=task->descriptorarray[j];
3018 struct parameterwrapper *parameter=param->queue;
3019 builditerators(task, j, parameter);
3024 void toiReset(struct tagobjectiterator * it) {
3027 } else if (it->numtags>0) {
3030 ObjectHashiterator(it->objectset, &it->it);
3034 int toiHasNext(struct tagobjectiterator *it,
3035 void ** objectarray OPTARG(int * failed)) {
3038 /* Get object with tags */
3039 struct ___Object___ *obj=objectarray[it->tagobjectslot];
3040 struct ___Object___ *tagptr=obj->___tags___;
3041 if (tagptr->type==TAGTYPE) {
3042 if ((it->tagobjindex==0)&& /* First object */
3043 (it->tagid==((struct ___TagDescriptor___ *)tagptr)->flag)) /* Right tag type */
3048 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
3049 int tagindex=it->tagobjindex;
3050 for(; tagindex<ao->___cachedCode___; tagindex++) {
3051 struct ___TagDescriptor___ *td=
3052 ARRAYGET(ao, struct ___TagDescriptor___ *, tagindex);
3053 if (td->flag==it->tagid) {
3054 it->tagobjindex=tagindex; /* Found right type of tag */
3060 } else if (it->numtags>0) {
3061 /* Use tags to locate appropriate objects */
3062 struct ___TagDescriptor___ *tag=objectarray[it->tagbindings[0]];
3063 struct ___Object___ *objptr=tag->flagptr;
3065 if (objptr->type!=OBJECTARRAYTYPE) {
3066 if (it->tagobjindex>0)
3068 if (!ObjectHashcontainskey(it->objectset, (int) objptr))
3070 for(i=1; i<it->numtags; i++) {
3071 struct ___TagDescriptor___ *tag2=objectarray[it->tagbindings[i]];
3072 if (!containstag(objptr,tag2))
3077 struct ArrayObject *ao=(struct ArrayObject *) objptr;
3080 for(tagindex=it->tagobjindex;tagindex<ao->___cachedCode___;tagindex++) {
3081 struct ___Object___ *objptr=ARRAYGET(ao, struct ___Object___*, tagindex);
3082 if (!ObjectHashcontainskey(it->objectset, (int) objptr))
3084 for(i=1; i<it->numtags; i++) {
3085 struct ___TagDescriptor___ *tag2=objectarray[it->tagbindings[i]];
3086 if (!containstag(objptr,tag2))
3089 it->tagobjindex=tagindex;
3094 it->tagobjindex=tagindex;
3098 return ObjhasNext(&it->it);
3102 int containstag(struct ___Object___ *ptr,
3103 struct ___TagDescriptor___ *tag) {
3105 struct ___Object___ * objptr=tag->flagptr;
3106 if (objptr->type==OBJECTARRAYTYPE) {
3107 struct ArrayObject *ao=(struct ArrayObject *)objptr;
3108 for(j=0; j<ao->___cachedCode___; j++) {
3109 if (ptr==ARRAYGET(ao, struct ___Object___*, j)) {
3119 void toiNext(struct tagobjectiterator *it,
3120 void ** objectarray OPTARG(int * failed)) {
3121 /* hasNext has all of the intelligence */
3124 /* Get object with tags */
3125 struct ___Object___ *obj=objectarray[it->tagobjectslot];
3126 struct ___Object___ *tagptr=obj->___tags___;
3127 if (tagptr->type==TAGTYPE) {
3129 objectarray[it->slot]=tagptr;
3131 struct ArrayObject *ao=(struct ArrayObject *) tagptr;
3132 objectarray[it->slot]=
3133 ARRAYGET(ao, struct ___TagDescriptor___ *, it->tagobjindex++);
3135 } else if (it->numtags>0) {
3136 /* Use tags to locate appropriate objects */
3137 struct ___TagDescriptor___ *tag=objectarray[it->tagbindings[0]];
3138 struct ___Object___ *objptr=tag->flagptr;
3139 if (objptr->type!=OBJECTARRAYTYPE) {
3141 objectarray[it->slot]=objptr;
3143 struct ArrayObject *ao=(struct ArrayObject *) objptr;
3144 objectarray[it->slot]=
3145 ARRAYGET(ao, struct ___Object___ *, it->tagobjindex++);
3148 /* Iterate object */
3149 objectarray[it->slot]=(void *)Objkey(&it->it);