8 #include "mlp_runtime.h"
9 #include "workschedule.h"
10 #include "methodheaders.h"
13 __thread SESEcommon* runningSESE;
14 __thread int childSESE=0;
16 __thread psemaphore runningSESEstallSem;
19 // this is for using a memPool to allocate task records,
20 // pass this into the poolcreate so it will run your
21 // custom init code ONLY for fresh records, reused records
22 // can be returned as is
23 void freshTaskRecordInitializer(void* seseRecord) {
24 SESEcommon* c = (SESEcommon*) seseRecord;
25 pthread_cond_init(&(c->runningChildrenCond), NULL);
26 pthread_mutex_init(&(c->lock), NULL);
34 void* mlpAllocSESErecord(int size) {
35 void* newrec = RUNMALLOC(size);
37 printf("mlpAllocSESErecord did not obtain memory!\n");
43 void mlpFreeSESErecord(SESEcommon* seseRecord) {
47 MemoryQueue** mlpCreateMemoryQueueArray(int numMemoryQueue) {
49 MemoryQueue** newMemoryQueue=(MemoryQueue**)RUNMALLOC(sizeof( MemoryQueue* ) * numMemoryQueue);
50 for(i=0; i<numMemoryQueue; i++) {
51 newMemoryQueue[i]=createMemoryQueue();
53 return newMemoryQueue;
56 REntry* mlpCreateFineREntry(MemoryQueue* q, int type, SESEcommon* seseToIssue, void* dynID) {
57 #ifdef OOO_DISABLE_TASKMEMPOOL
58 REntry* newREntry=(REntry*)RUNMALLOC(sizeof(REntry));
60 REntry* newREntry=poolalloc(q->rentrypool);
63 newREntry->seseRec=seseToIssue;
64 newREntry->pointer=dynID;
69 REntry* mlpCreateREntry(MemoryQueue* q, int type, SESEcommon* seseToIssue, INTPTR mask) {
71 REntry* mlpCreateREntry(MemoryQueue* q, int type, SESEcommon* seseToIssue) {
73 #ifdef OOO_DISABLE_TASKMEMPOOL
74 REntry* newREntry=(REntry*)RUNMALLOC(sizeof(REntry));
76 REntry* newREntry=poolalloc(q->rentrypool);
79 newREntry->seseRec=seseToIssue;
86 int isParent(REntry *r) {
87 if (r->type==PARENTREAD || r->type==PARENTWRITE || r->type==PARENTCOARSE) {
94 int isParentCoarse(REntry *r) {
95 if (r->type==PARENTCOARSE) {
102 int isFineRead(REntry *r) {
103 if (r->type==READ || r->type==PARENTREAD) {
110 int isFineWrite(REntry *r) {
111 if (r->type==WRITE || r->type==PARENTWRITE) {
118 int isCoarse(REntry *r) {
119 if(r->type==COARSE || r->type==PARENTCOARSE) {
126 int isSCC(REntry *r) {
127 if(r->type==SCCITEM) {
134 int isSingleItem(MemoryQueueItem *qItem) {
135 if(qItem->type==SINGLEITEM) {
142 int isHashtable(MemoryQueueItem *qItem) {
143 if(qItem->type==HASHTABLE) {
150 int isVector(MemoryQueueItem *qItem) {
151 if(qItem->type==VECTOR) {
158 int isReadBinItem(BinItem* b) {
159 if(b->type==READBIN) {
166 int isWriteBinItem(BinItem* b) {
167 if(b->type==WRITEBIN) {
174 int generateKey(unsigned int data) {
175 return (data&H_MASK);
178 Hashtable* createHashtable() {
180 Hashtable* newTable=(Hashtable*)RUNMALLOC(sizeof(Hashtable));
181 newTable->item.type=HASHTABLE;
182 for(i=0; i<NUMBINS; i++) {
183 newTable->array[i]=(BinElement*)RUNMALLOC(sizeof(BinElement));
184 newTable->array[i]->head=NULL;
185 newTable->array[i]->tail=NULL;
187 newTable->unresolvedQueue=NULL;
191 WriteBinItem* createWriteBinItem() {
192 WriteBinItem* binitem=(WriteBinItem*)RUNMALLOC(sizeof(WriteBinItem));
193 binitem->item.type=WRITEBIN;
197 ReadBinItem* createReadBinItem() {
198 ReadBinItem* binitem=(ReadBinItem*)RUNMALLOC(sizeof(ReadBinItem));
200 binitem->item.type=READBIN;
204 Vector* createVector() {
205 Vector* vector=(Vector*)RUNMALLOC(sizeof(Vector));
207 vector->item.type=VECTOR;
212 SCC* scc=(SCC*)RUNMALLOC(sizeof(SCC));
213 scc->item.type=SINGLEITEM;
217 MemoryQueue* createMemoryQueue() {
218 MemoryQueue* queue = (MemoryQueue*)RUNMALLOC(sizeof(MemoryQueue));
219 MemoryQueueItem* dummy=(MemoryQueueItem*)RUNMALLOC(sizeof(MemoryQueueItem));
220 dummy->type=3; // dummy type
225 #ifndef OOO_DISABLE_TASKMEMPOOL
226 queue->rentrypool = poolcreate(sizeof(REntry), NULL);
231 int ADDRENTRY(MemoryQueue * q, REntry * r) {
232 if (isFineRead(r) || isFineWrite(r)) {
233 return ADDTABLE(q, r);
234 } else if (isCoarse(r)) {
235 return ADDVECTOR(q, r);
236 } else if (isSCC(r)) {
241 int ADDTABLE(MemoryQueue *q, REntry *r) {
242 if(!isHashtable(q->tail)) {
244 MemoryQueueItem* tail=q->tail;
245 //optimization only on next line....DO NOT TRUST THIS TO RETIRE PARENTS!!!!
246 if (isParent(r) && tail->total==0 && q->tail==q->head) {
251 Hashtable* h=createHashtable();
252 tail->next=(MemoryQueueItem*)h;
253 //************NEED memory barrier here to ensure compiler does not cache Q.tail.status********
255 if (tail->status==READY && tail->total==0 && q->tail==q->head) {
256 //previous Q item is finished
257 h->item.status=READY;
259 q->tail=(MemoryQueueItem*)h;
260 // handle the the queue item case
261 if(q->head->type==3) {
262 q->head=(MemoryQueueItem*)h;
266 //at this point, have table
267 Hashtable* table=(Hashtable*)q->tail;
268 r->qitem=(MemoryQueueItem *) table; // set rentry's hashtable
269 if( *(r->pointer)==0 ||
270 ( *(r->pointer)!=0 &&
272 table->unresolvedQueue!=NULL
276 // grab lock on the queue
278 val=(struct Queue*)0x1;
279 val=(struct Queue*)LOCKXCHG((unsigned INTPTR*)&(table->unresolvedQueue), (unsigned INTPTR)val);
280 } while(val==(struct Queue*)0x1);
282 //queue is null, first case
283 if(*(r->pointer)!=0) {
284 // check whether pointer is already resolved, or not.
285 table->unresolvedQueue=NULL; //released lock;
286 return ADDTABLEITEM(table,r,TRUE);
288 struct Queue* queue=createQueue();
289 addNewItemBack(queue,r);
290 atomic_inc(&table->item.total);
291 table->unresolvedQueue=queue; // expose new queue
293 // add unresolved rentry at the end of the queue.
294 addNewItemBack(val,r);
295 atomic_inc(&table->item.total);
296 table->unresolvedQueue=val; // released lock
302 // leave this--its a helpful test when things are going bonkers
303 //if( OBJPTRPTR_2_OBJOID( r->pointer ) == 0 ) {
304 // // we started numbering object ID's at 1, if we try to
305 // // hash a zero oid, something BAD is about to happen!
306 // printf( "Tried to insert invalid object type=%d into mem Q hashtable!\n",
307 // OBJPTRPTR_2_OBJTYPE( r->pointer ) );
310 int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
313 BinElement* bin=table->array[key];
314 val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val); //note...talk to me about optimizations here.
315 } while(val==(BinItem*)0x1);
316 //at this point have locked bin
318 return EMPTYBINCASE(table, table->array[key], r, TRUE);
320 if (isFineWrite(r)) {
321 return WRITEBINCASE(table, r, val, key, TRUE);
322 } else if (isFineRead(r)) {
323 return READBINCASE(table, r, val, key, TRUE);
328 int ADDTABLEITEM(Hashtable* table, REntry* r, int inc) {
331 int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
334 BinElement* bin=table->array[key];
335 val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val);
336 } while(val==(BinItem*)0x1);
337 //at this point have locked bin
339 return EMPTYBINCASE(table, table->array[key], r, inc);
341 if (isFineWrite(r)) {
342 return WRITEBINCASE(table, r, val, key, inc);
343 } else if (isFineRead(r)) {
344 return READBINCASE(table, r, val, key, inc);
349 int EMPTYBINCASE(Hashtable *T, BinElement* be, REntry *r, int inc) {
352 if (isFineWrite(r)) {
353 b=(BinItem*)createWriteBinItem();
354 ((WriteBinItem*)b)->val=r; //<-only different statement
355 } else if (isFineRead(r)) {
356 b=(BinItem*)createReadBinItem();
357 ReadBinItem* readbin=(ReadBinItem*)b;
358 readbin->array[readbin->index++]=r;
362 if (T->item.status==READY) {
363 //current entry is ready
367 be->head=NULL; // released lock
376 atomic_inc(&T->item.total);
378 r->qitem=(MemoryQueueItem *)T;
381 be->head=b; //released lock
385 int WRITEBINCASE(Hashtable *T, REntry *r, BinItem *val, int key, int inc) {
386 //chain of bins exists => tail is valid
387 //if there is something in front of us, then we are not ready
390 BinElement* be=T->array[key];
392 BinItem *bintail=be->tail;
394 WriteBinItem *b=createWriteBinItem();
398 // note: If current table clears all dependencies, then write bin is ready
402 atomic_inc(&T->item.total);
405 r->qitem=(MemoryQueueItem *)T;
406 r->binitem=(BinItem*)b;
408 be->tail->next=(BinItem*)b;
409 //need to check if we can go...
411 if (T->item.status==READY) {
412 for(; val!=NULL; val=val->next) {
413 if (val==((BinItem *)b)) {
417 b->item.status=retval; //unsure if really needed at this point..
418 be->head=NULL; // released lock
422 } else if (val->total!=0) {
428 b->item.status=retval;
429 be->tail=(BinItem*)b;
434 int READBINCASE(Hashtable *T, REntry *r, BinItem *val, int key, int inc) {
435 BinItem * bintail=T->array[key]->tail;
436 if (isReadBinItem(bintail)) {
437 return TAILREADCASE(T, r, val, bintail, key, inc);
438 } else if (!isReadBinItem(bintail)) {
439 TAILWRITECASE(T, r, val, bintail, key, inc);
444 int TAILREADCASE(Hashtable *T, REntry *r, BinItem *val, BinItem *bintail, int key, int inc) {
445 ReadBinItem * readbintail=(ReadBinItem*)T->array[key]->tail;
447 if (readbintail->item.status==READY) {
451 T->array[key]->head=val; //released lock
459 if (readbintail->index==NUMREAD) { // create new read group
460 ReadBinItem* rb=createReadBinItem();
461 rb->array[rb->index++]=r;
462 rb->item.total=1; //safe only because item could not have started
463 rb->item.status=status;
464 T->array[key]->tail->next=(BinItem*)rb;
465 T->array[key]->tail=(BinItem*)rb;
466 r->binitem=(BinItem*)rb;
467 } else { // group into old tail
468 readbintail->array[readbintail->index++]=r;
469 atomic_inc(&readbintail->item.total);
470 r->binitem=(BinItem*)readbintail;
473 atomic_inc(&T->item.total);
475 r->qitem=(MemoryQueueItem *)T;
476 T->array[key]->head=val; //released lock
480 void TAILWRITECASE(Hashtable *T, REntry *r, BinItem *val, BinItem *bintail, int key, int inc) {
481 // WriteBinItem* wb=createWriteBinItem();
483 //wb->item.total=1;//safe because item could not have started
484 //wb->item.status=NOTREADY;
485 ReadBinItem* rb=createReadBinItem();
486 rb->array[rb->index++]=r;
487 rb->item.total=1; //safe because item could not have started
488 rb->item.status=NOTREADY;
490 atomic_inc(&T->item.total);
492 r->qitem=(MemoryQueueItem *)T;
493 r->binitem=(BinItem*)rb;
494 T->array[key]->tail->next=(BinItem*)rb;
495 T->array[key]->tail=(BinItem*)rb;
496 T->array[key]->head=val; //released lock
499 int ADDVECTOR(MemoryQueue *Q, REntry *r) {
500 if(!isVector(Q->tail)) {
502 if (isParentCoarse(r) && Q->tail->total==0 && Q->tail==Q->head) {
507 Vector* V=createVector();
508 V->item.status=NOTREADY;
510 Q->tail->next=(MemoryQueueItem*)V;
511 //************NEED memory barrier here to ensure compiler does not cache Q.tail.status******
513 if (Q->tail->status==READY&&Q->tail->total==0&&Q->head==Q->tail) {
514 //previous Q item is finished
515 V->item.status=READY;
516 //Get rid of item in front of us...
517 CAS((unsigned INTPTR*)&(Q->head), (unsigned INTPTR)Q->tail, (unsigned INTPTR)V);
519 Q->tail=(MemoryQueueItem*)V;
520 // handle the the queue item case
521 if(Q->head->type==3) {
522 Q->head=(MemoryQueueItem*)V;
525 //at this point, have vector
526 Vector* V=(Vector*)Q->tail;
527 if (V->index==NUMITEMS) {
531 V->item.status=NOTREADY;
532 Q->tail->next=(MemoryQueueItem*)V;
533 //***NEED memory barrier here to ensure compiler does not cache Q.tail.status******
535 if (Q->tail->status==READY) {
536 V->item.status=READY;
538 if (Q->tail->total==0&&Q->head==Q->tail) {
539 //may need to remove things
540 CAS((unsigned INTPTR*)&(Q->head), (unsigned INTPTR)Q->tail, (unsigned INTPTR)V);
542 Q->tail=(MemoryQueueItem*)V;
545 atomic_inc(&V->item.total);
552 //*****NEED memory barrier here to ensure compiler does not reorder writes to V.array and V.index
555 //*****NEED memory barrier here to ensure compiler does not cache V.status*********
556 r->qitem=(MemoryQueueItem *)V;
558 if (V->item.status==READY) {
560 flag=(void*)LOCKXCHG((unsigned INTPTR*)&(V->array[index]), (unsigned INTPTR)flag);
562 if (isParentCoarse(r)) { //parent's retire immediately
563 atomic_dec(&V->item.total);
566 #if defined(RCR)&&!defined(OOO_DISABLE_TASKMEMPOOL)
567 if (atomic_sub_and_test(1, &r->count))
568 poolfreeinto(Q->rentrypool, r);
573 return NOTREADY; //<- means that some other dispatcher got this one...so need to do accounting correctly
581 //SCC's don't come in parent variety
582 int ADDSCC(MemoryQueue *Q, REntry *r) {
587 S->item.status=NOTREADY;
588 r->qitem=(MemoryQueueItem *)S;
589 //*** NEED BARRIER HERE -- data structure needs to be complete before exposing
591 Q->tail->next=(MemoryQueueItem*)S;
592 //*** NEED BARRIER HERE
594 if (Q->tail->status==READY && Q->tail->total==0 && Q->tail==Q->head) {
595 //previous Q item is finished
596 S->item.status=READY;
597 Q->tail=(MemoryQueueItem*)S;
598 // handle the the queue item case
599 if(Q->head->type==3) {
600 Q->head=(MemoryQueueItem*)S;
603 flag=(void*)LOCKXCHG((unsigned INTPTR*)&(S->val), (unsigned INTPTR)flag);
605 #if defined(RCR)&&!defined(OOO_DISABLE_TASKMEMPOOL)
606 if (atomic_sub_and_test(1, &r->count))
607 poolfreeinto(Q->rentrypool, r);
611 return NOTREADY; //<- means that some other dispatcher got this one...so need to do accounting correctly
614 Q->tail=(MemoryQueueItem*)S;
620 void RETIRERENTRY(MemoryQueue* Q, REntry * r) {
621 if (isFineWrite(r)||isFineRead(r)) {
622 RETIREHASHTABLE(Q, r);
623 } else if (isCoarse(r)) {
625 } else if (isSCC(r)) {
628 #ifndef OOO_DISABLE_TASKMEMPOOL
630 if (atomic_sub_and_test(1, &r->count))
632 poolfreeinto(Q->rentrypool, r);
636 void RETIRESCC(MemoryQueue *Q, REntry *r) {
637 SCC* s=(SCC *)r->qitem;
638 s->item.total=0; //don't need atomicdec
641 flag=(void*)LOCKXCHG((unsigned INTPTR*)&(s->val), (unsigned INTPTR)flag);
643 #ifndef OOO_DISABLE_TASKMEMPOOL
644 RELEASE_REFERENCE_TO(((REntry*)flag)->seseRec);
646 //only release reference if we haven't cleared it before
647 #if !defined(OOO_DISABLE_TASKMEMPOOL)&&defined(RCR)
648 if (atomic_sub_and_test(1, &r->count))
649 poolfreeinto(Q->rentrypool, r);
657 void RETIREHASHTABLE(MemoryQueue *q, REntry *r) {
658 Hashtable *T=(Hashtable *)r->qitem;
659 BinItem *b=r->binitem;
661 atomic_dec(&T->item.total);
663 if (T->item.next!=NULL && T->item.total==0) {
668 void RETIREBIN(Hashtable *T, REntry *r, BinItem *b) {
669 int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
671 atomic_dec(&b->total);
673 if (isFineWrite(r) || (isFineRead(r) && b->next!=NULL && b->total==0)) {
674 // CHECK FIRST IF next is nonnull to guarantee that b.total cannot change
678 val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(T->array[key]->head), (unsigned INTPTR)val);
679 } while(val==(BinItem*)0x1);
680 // at this point have locked bin
685 if (isReadBinItem(ptr)) {
686 ReadBinItem* rptr=(ReadBinItem*)ptr;
687 if (rptr->item.status==NOTREADY) {
688 for (i=0; i<rptr->index; i++) {
689 resolveDependencies(rptr->array[i]);
690 if (isParent(rptr->array[i])) {
691 //parents go immediately
692 atomic_dec(&rptr->item.total);
693 atomic_dec(&T->item.total);
697 rptr->item.status=READY;
698 if (rptr->item.next==NULL) {
701 if (rptr->item.total!=0) {
703 } else if ((BinItem*)rptr==val) {
706 } else if(isWriteBinItem(ptr)) {
709 if(ptr->status==NOTREADY) {
710 resolveDependencies(((WriteBinItem*)ptr)->val);
712 if(isParent(((WriteBinItem*)ptr)->val)) {
713 atomic_dec(&T->item.total);
717 } else { // write bin is already resolved
721 if(ptr->status==NOTREADY) {
722 resolveDependencies(((WriteBinItem*)ptr)->val);
725 if (isParent(((WriteBinItem*)ptr)->val)) {
726 atomic_dec(&T->item.total);
736 T->array[key]->head=val; // release lock
741 void RETIREVECTOR(MemoryQueue *Q, REntry *r) {
742 Vector* V=(Vector *)r->qitem;
743 atomic_dec(&V->item.total);
746 val=(REntry*)LOCKXCHG((unsigned INTPTR*)&(V->array[r->index]), (unsigned INTPTR)val);
748 //release reference if we haven't cleared this one
749 #if !defined(OOO_DISABLE_TASKMEMPOOL)&&defined(RCR)
750 if (atomic_sub_and_test(1, &r->count))
751 poolfreeinto(Q->rentrypool, r);
753 RELEASE_REFERENCE_TO( ((REntry*)val)->seseRec);
756 if (V->item.next!=NULL && V->item.total==0) { //NOTE: ORDERING CRUCIAL HERE
761 void RESOLVECHAIN(MemoryQueue *Q) {
763 MemoryQueueItem* head=Q->head;
764 if (head->next==NULL||head->total!=0) {
765 //item is not finished
766 if (head->status!=READY) {
767 //need to update status
769 if (isHashtable(head)) {
770 RESOLVEHASHTABLE(Q, (Hashtable *) head);
771 } else if (isVector(head)) {
772 RESOLVEVECTOR(Q, (Vector *) head);
773 } else if (isSingleItem(head)) {
774 RESOLVESCC(Q, (SCC *)head);
776 if (head->next==NULL)
783 MemoryQueueItem* nextitem=head->next;
784 CAS((unsigned INTPTR*)&(Q->head), (unsigned INTPTR)head, (unsigned INTPTR)nextitem);
785 //oldvalue not needed... if we fail we just repeat
790 void RESOLVEHASHTABLE(MemoryQueue *Q, Hashtable *T) {
792 for (binidx=0; binidx<NUMBINS; binidx++) {
793 BinElement* bin=T->array[binidx];
797 val=(BinItem*)LOCKXCHG((unsigned INTPTR*)&(bin->head), (unsigned INTPTR)val);
798 } while (val==(BinItem*)1);
799 //at this point have locked bin
802 if(ptr!=NULL&&ptr->status==NOTREADY) {
804 if (isWriteBinItem(ptr)) {
807 resolveDependencies(((WriteBinItem*)ptr)->val);
809 if (isParent(((WriteBinItem*)ptr)->val)) {
810 atomic_dec(&T->item.total);
814 } else if (isReadBinItem(ptr)) {
816 ReadBinItem* rptr=(ReadBinItem*)ptr;
817 for(i=0; i<rptr->index; i++) {
818 resolveDependencies(rptr->array[i]);
819 if (isParent(rptr->array[i])) {
820 atomic_dec(&rptr->item.total);
821 atomic_dec(&T->item.total);
824 if (rptr->item.next==NULL||rptr->item.total!=0) {
826 } else if((BinItem*)rptr==val) {
829 rptr->item.status=READY;
834 bin->head=val; // released lock;
838 void RESOLVEVECTOR(MemoryQueue *q, Vector *V) {
844 for (i=0; i<NUMITEMS; i++) {
846 val=(REntry*)LOCKXCHG((unsigned INTPTR*)&(tmp->array[i]), (unsigned INTPTR)val);
848 SESEcommon *seseCommon=val->seseRec;
849 resolveDependencies(val);
851 atomic_dec(&tmp->item.total);
853 poolfreeinto(q->rentrypool,val);
856 #if defined(RCR)&&!defined(OOO_DISABLE_TASKMEMPOOL)
857 else if (atomic_sub_and_test(1, &((REntry *)val)->count))
858 poolfreeinto(q->rentrypool,val);
859 RELEASE_REFERENCE_TO(seseCommon);
863 if (tmp->item.next!=NULL&&isVector(tmp->item.next)) {
864 tmp=(Vector*)tmp->item.next;
871 void RESOLVESCC(MemoryQueue *q, SCC *S) {
872 //precondition: SCC's state is READY
874 flag=(void*)LOCKXCHG((unsigned INTPTR*)&(S->val), (unsigned INTPTR)flag);
876 SESEcommon *seseCommon=((REntry *)flag)->seseRec;
877 resolveDependencies(flag);
878 #if defined(RCR)&&!defined(OOO_DISABLE_TASKMEMPOOL)
879 if (atomic_sub_and_test(1, &((REntry *)flag)->count))
880 poolfreeinto(q->rentrypool, flag);
881 RELEASE_REFERENCE_TO(seseCommon);
887 void resolveDependencies(REntry* rentry) {
888 SESEcommon* seseCommon=(SESEcommon*)rentry->seseRec;
889 int type=rentry->type;
891 if (type==COARSE||type==SCCITEM) {
892 struct rcrRecord * array=(struct rcrRecord *)(((char *)seseCommon)+seseCommon->offsetToParamRecords);
893 INTPTR mask=rentry->mask;
896 int shift=__builtin_ctzll(mask)+1;
899 if(atomic_sub_and_test(1, &array[index].flag)) {
900 if(atomic_sub_and_test(1, &(seseCommon->unresolvedDependencies)))
901 workScheduleSubmit((void *)seseCommon);
904 } else if (type==PARENTCOARSE) {
905 if (atomic_sub_and_test(1, &(seseCommon->unresolvedDependencies))) {
906 psem_give_tag(seseCommon->parentsStallSem, ((SESEstall *) seseCommon)->tag);
907 //release our reference to stallrecord
910 printf("ERROR: REntry type %d should never be generated in RCR..\n", rentry->type);
913 if(type==READ || type==WRITE || type==COARSE || type==SCCITEM) {
914 if( atomic_sub_and_test(1, &(seseCommon->unresolvedDependencies)) ) {
915 workScheduleSubmit(seseCommon);
917 } else if(type==PARENTREAD || type==PARENTWRITE || type==PARENTCOARSE) {
918 psem_give_tag(rentry->parentStallSem, rentry->tag);
923 void INITIALIZEBUF(MemoryQueue * q) {
925 for(i=0; i<NUMBINS; i++) {
931 void ADDRENTRYTOBUF(MemoryQueue * q, REntry * r) {
932 q->buf[q->bufcount]=r;
936 int RESOLVEBUFFORHASHTABLE(MemoryQueue * q, Hashtable* table, SESEcommon *seseCommon) {
938 // first phase: only consider write rentry
939 for(i=0; i<q->bufcount; i++) {
942 int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
943 if(q->binbuf[key]==NULL) {
944 // for multiple writes, add only the first write that hashes to the same bin
951 // second phase: enqueue read items if it is eligible
952 for(i=0; i<q->bufcount; i++) {
954 if(r!=NULL && r->type==READ) {
955 int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
956 if(q->binbuf[key]==NULL) {
957 // read item that hashes to the bin which doen't contain any write
958 seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
959 if(ADDTABLEITEM(table, r, FALSE)==READY) {
960 resolveDependencies(r);
967 // then, add only one of write items that hashes to the same bin
968 for(i=0; i<q->bufcount; i++) {
971 seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
972 if(ADDTABLEITEM(table, r, FALSE)==READY) {
973 resolveDependencies(r);
980 int RESOLVEBUF(MemoryQueue * q, SESEcommon *seseCommon) {
983 // check if every waiting entry is resolved
984 // if not, defer every items for hashtable until it is resolved.
985 int unresolved=FALSE;
986 for(i=0; i<q->bufcount; i++) {
988 if(*(r->pointer)==0) {
992 if(unresolved==TRUE) {
993 for(i=0; i<q->bufcount; i++) {
997 if(ADDRENTRY(q,r)==NOTREADY) {
1004 // first phase: only consider write rentry
1005 for(i=0; i<q->bufcount; i++) {
1006 REntry *r=q->buf[i];
1007 if(r->type==WRITE) {
1008 int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
1009 if(q->binbuf[key]==NULL) {
1010 // for multiple writes, add only the first write that hashes to the same bin
1017 // second phase: enqueue read items if it is eligible
1018 for(i=0; i<q->bufcount; i++) {
1019 REntry *r=q->buf[i];
1020 if(r!=NULL && r->type==READ) {
1021 int key=generateKey(OBJPTRPTR_2_OBJOID(r->pointer) );
1022 if(q->binbuf[key]==NULL) {
1023 // read item that hashes to the bin which doen't contain any write
1024 seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
1025 if(ADDRENTRY(q,r)==NOTREADY) {
1033 // then, add only one of write items that hashes to the same bin
1034 for(i=0; i<q->bufcount; i++) {
1035 REntry *r=q->buf[i];
1037 seseCommon->rentryArray[seseCommon->rentryIdx++]=r;
1038 if(ADDRENTRY(q,r)==NOTREADY) {
1047 void resolvePointer(REntry* rentry) {
1048 Hashtable* table=(Hashtable *)rentry->qitem;
1050 // we don't need to consider unresolved cases for coarse rentries.
1051 // or if resolved already before related rentry is enqueued to the waiting queue
1052 if(rentry->type==COARSE ||
1053 rentry->type==PARENTCOARSE ||
1054 rentry->type==SCCITEM ||
1056 table->unresolvedQueue==NULL) {
1061 val=(struct Queue*)0x1;
1062 val=(struct Queue*)LOCKXCHG((unsigned INTPTR*)&(table->unresolvedQueue), (unsigned INTPTR)val);
1063 } while(val==(struct Queue*)0x1);
1065 getHead(val)!=NULL &&
1066 getHead(val)->objectptr==rentry) {
1067 // handling pointer is the first item of the queue
1068 // start to resolve until it reaches unresolved pointer or end of queue
1069 INTPTR currentSESE=0;
1071 struct QueueItem* head=getHead(val);
1073 REntry* rentry=(REntry*)head->objectptr;
1074 if(*(rentry->pointer)==0) {
1075 // encounters following unresolved pointer
1076 table->unresolvedQueue=val; //released lock
1079 removeItem(val,head);
1081 //now, address is resolved
1083 //check if rentry is buffer mode
1084 if(rentry->isBufMode==TRUE) {
1085 if(currentSESE==0) {
1086 queue=rentry->queue;
1087 INITIALIZEBUF(queue);
1088 currentSESE=(INTPTR)rentry;
1089 ADDRENTRYTOBUF(queue,rentry);
1090 } else if(currentSESE==(INTPTR)rentry) {
1091 ADDRENTRYTOBUF(queue,rentry);
1092 } else if(currentSESE!=(INTPTR)rentry) {
1093 RESOLVEBUFFORHASHTABLE(queue,table,(SESEcommon*)rentry->seseRec);
1094 currentSESE=(INTPTR)rentry;
1095 INITIALIZEBUF(queue);
1096 ADDRENTRYTOBUF(rentry->queue,rentry);
1099 if(currentSESE!=0) {
1100 //previous SESE has buf mode, need to invoke resolve buffer
1101 RESOLVEBUFFORHASHTABLE(queue,table,(SESEcommon*)rentry->seseRec);
1105 if(ADDTABLEITEM(table, rentry, FALSE)==READY) {
1106 resolveDependencies(rentry);
1110 table->unresolvedQueue=NULL; // set hashtable as normal-mode.
1115 // resolved rentry is not head of queue
1116 table->unresolvedQueue=val; //released lock;