From 401d66c2002cc5972e752a71b590e3fbea192d23 Mon Sep 17 00:00:00 2001 From: yeom Date: Mon, 1 Feb 2010 21:29:49 +0000 Subject: [PATCH] keep last snapshot for benchmark before moving to new queue impl. --- Robust/src/Analysis/MLP/ConflictGraph.java | 27 ++++++++ Robust/src/Analysis/MLP/WaitingElement.java | 21 +++++- Robust/src/IR/Flat/BuildCode.java | 74 +++++++++++++-------- Robust/src/Runtime/mlp_runtime.c | 3 + Robust/src/Runtime/mlp_runtime.h | 4 ++ 5 files changed, 99 insertions(+), 30 deletions(-) diff --git a/Robust/src/Analysis/MLP/ConflictGraph.java b/Robust/src/Analysis/MLP/ConflictGraph.java index 59b263d2..04288209 100644 --- a/Robust/src/Analysis/MLP/ConflictGraph.java +++ b/Robust/src/Analysis/MLP/ConflictGraph.java @@ -1045,6 +1045,8 @@ public class ConflictGraph { .next(); int type = -1; HashSet allocSet = new HashSet(); + HashSet connectedSet=new HashSet(); + String dynID=""; if (conflictEdge.getType() == ConflictEdge.COARSE_GRAIN_EDGE) { if (isReadOnly(node)) { @@ -1063,12 +1065,22 @@ public class ConflictGraph { // fine-grain // edge allocSet.addAll(getAllocSet(node)); + if(conflictEdge.getVertexU() instanceof LiveInNode ){ + connectedSet.add(new Integer(((LiveInNode)conflictEdge.getVertexU()).getSESEIdentifier())); + } + if(conflictEdge.getVertexV() instanceof LiveInNode ){ + connectedSet.add(new Integer(((LiveInNode)conflictEdge.getVertexV()).getSESEIdentifier())); + } + + if (isReadOnly(node)) { // fine-grain read type = 0; + dynID=node.getTempDescriptor().toString(); } else { // fine-grain write type = 1; + dynID=node.getTempDescriptor().toString(); } } @@ -1082,9 +1094,24 @@ public class ConflictGraph { newElement.setAllocList(allocSet); newElement.setWaitingID(seseLock.getID()); newElement.setStatus(type); + newElement.setDynID(dynID); + newElement.setConnectedSet(connectedSet); +// System.out.println(seseID+"connectedSet="+connectedSet); if(!waitingElementSet.contains(newElement)){ waitingElementSet.add(newElement); }else{ + for (Iterator iterator2 = waitingElementSet + .iterator(); iterator2 + .hasNext();) { + WaitingElement e = (WaitingElement) iterator2 + .next(); + if(e.equals(newElement)){ + e.getConnectedSet().addAll(connectedSet); +// System.out.println(seseID+"!!!connectedSet="+e.getConnectedSet()); + } + } + + } diff --git a/Robust/src/Analysis/MLP/WaitingElement.java b/Robust/src/Analysis/MLP/WaitingElement.java index 33606cf5..7524285a 100644 --- a/Robust/src/Analysis/MLP/WaitingElement.java +++ b/Robust/src/Analysis/MLP/WaitingElement.java @@ -8,15 +8,34 @@ public class WaitingElement { private int waitingID; private int status; private HashSet allocList; + private String dynID; + private HashSet connectedSet; public WaitingElement() { this.allocList = new HashSet(); + this.connectedSet = new HashSet(); } - + public void setWaitingID(int waitingID) { this.waitingID = waitingID; } + + public HashSet getConnectedSet() { + return connectedSet; + } + + public void setConnectedSet(HashSet connectedSet) { + this.connectedSet.addAll(connectedSet); + } + public String getDynID(){ + return dynID; + } + + public void setDynID(String dynID){ + this.dynID=dynID; + } + public int getWaitingID() { return waitingID; } diff --git a/Robust/src/IR/Flat/BuildCode.java b/Robust/src/IR/Flat/BuildCode.java index b82448ed..1724ea38 100644 --- a/Robust/src/IR/Flat/BuildCode.java +++ b/Robust/src/IR/Flat/BuildCode.java @@ -2110,13 +2110,16 @@ public class BuildCode { // can't grab something from this source until it is done output.println(" {"); - output.println(" SESEcommon* com = (SESEcommon*)"+paramsprefix+"->"+srcPair+";" ); - output.println(" pthread_mutex_lock( &(com->lock) );"); - output.println(" while( com->doneExecuting == FALSE ) {"); - output.println(" pthread_cond_wait( &(com->doneCond), &(com->lock) );"); - output.println(" }"); - output.println(" pthread_mutex_unlock( &(com->lock) );"); + /* + If we are running, everything is done. This check is redundant. + output.println(" SESEcommon* com = (SESEcommon*)"+paramsprefix+"->"+srcPair+";" ); + output.println(" pthread_mutex_lock( &(com->lock) );"); + output.println(" while( com->doneExecuting == FALSE ) {"); + output.println(" pthread_cond_wait( &(com->doneCond), &(com->lock) );"); + output.println(" }"); + output.println(" pthread_mutex_unlock( &(com->lock) );"); + */ output.println(" "+generateTemp( fsen.getfmBogus(), temp, null )+ " = "+paramsprefix+"->"+srcPair+"->"+vst.getAddrVar()+";"); @@ -2134,11 +2137,14 @@ public class BuildCode { // gotta wait until the source is done output.println(" SESEcommon* com = (SESEcommon*)"+paramsprefix+"->"+temp+"_srcSESE;" ); - output.println(" pthread_mutex_lock( &(com->lock) );"); - output.println(" while( com->doneExecuting == FALSE ) {"); - output.println(" pthread_cond_wait( &(com->doneCond), &(com->lock) );"); - output.println(" }"); - output.println(" pthread_mutex_unlock( &(com->lock) );"); + /* + If we are running, everything is done! + output.println(" pthread_mutex_lock( &(com->lock) );"); + output.println(" while( com->doneExecuting == FALSE ) {"); + output.println(" pthread_cond_wait( &(com->doneCond), &(com->lock) );"); + output.println(" }"); + output.println(" pthread_mutex_unlock( &(com->lock) );"); + */ String typeStr; if( type.isNull() ) { @@ -2734,7 +2740,9 @@ public class BuildCode { output.println(" WaitingElement* newElement=NULL;"); output.println(" struct QueueItem* newQItem=NULL;"); output.println(" waitingQueueItemID++;"); - output.println(" psem_init( &(seseCaller->memoryStallSiteSem) );"); +// output.println(" psem_init( &(seseCaller->memoryStallSiteSem) );"); + output.println(" pthread_cond_init( &(seseCaller->stallDone), NULL );"); +// output.println(" psem_init( &(seseCaller->memoryStallSiteSem) );"); output.println(" int qIdx;"); output.println(" int takeCount=0;"); for (Iterator iterator = waitingElementSet.iterator(); iterator.hasNext();) { @@ -2754,13 +2762,13 @@ public class BuildCode { output.println(" addNewItemBack(seseCaller->allocSiteArray[qIdx].waitingQueue,newElement);"); output.println(" takeCount++;"); output.println(" }"); - } - - output.println(" pthread_mutex_unlock( &(seseCaller->lock) );"); output.println(" if( takeCount>0 ){"); - output.println(" psem_take( &(seseCaller->memoryStallSiteSem) );"); +// output.println(" psem_take( &(seseCaller->memoryStallSiteSem) );"); + output.println(" pthread_cond_wait( &(seseCaller->stallDone), &(seseCaller->lock) );"); output.println(" }"); + + output.println(" pthread_mutex_unlock( &(seseCaller->lock) );"); output.println(" }"); } @@ -3310,9 +3318,7 @@ public class BuildCode { // before doing anything, lock your own record and increment the running children if( fsen != mlpa.getMainSESE() ) { - output.println(" pthread_mutex_lock( &(parentCommon->lock) );"); - output.println(" ++(parentCommon->numRunningChildren);"); - output.println(" pthread_mutex_unlock( &(parentCommon->lock) );"); + output.println(" atomic_inc(&parentCommon->numRunningChildren);"); } // just allocate the space for this record @@ -3667,6 +3673,8 @@ public class BuildCode { output.println(" while(nextQItem!=NULL){"); output.println(" WaitingElement* nextItem=nextQItem->objectptr;"); output.println(" SESEcommon* seseNextItem=(SESEcommon*)nextItem->seseRec;"); + output.println(" if(nextItem->resolved==0){"); + output.println(" int isResolved=isRunnable(___params___->common.parent->allocSiteArray[idx].waitingQueue,nextQItem);"); output.println(" if(seseNextItem->classID==___params___->common.parent->classID){"); // stall site output.println(" if(isResolved){"); @@ -3681,9 +3689,10 @@ public class BuildCode { output.println(" }"); output.println(" }else{"); output.println(" if(isResolved){"); - output.println(" struct QueueItem* currentItem=findItem(___params___->common.parent->allocSiteArray[idx].waitingQueue,nextItem);"); - output.println(" nextQItem=getNextQueueItem(currentItem);"); - output.println(" removeItem(___params___->common.parent->allocSiteArray[idx].waitingQueue,currentItem);"); + //output.println(" struct QueueItem* currentItem=findItem(___params___->common.parent->allocSiteArray[idx].waitingQueue,nextItem);"); + //output.println(" nextQItem=getNextQueueItem(currentItem);"); + //output.println(" removeItem(___params___->common.parent->allocSiteArray[idx].waitingQueue,currentItem);"); + output.println(" nextItem->resolved=1;"); output.println(" if( atomic_sub_and_test(1, &(seseNextItem->unresolvedDependencies)) ){"); output.println(" addNewItem(launchQueue,(void*)seseNextItem);"); output.println(" }"); @@ -3691,6 +3700,11 @@ public class BuildCode { output.println(" nextQItem=getNextQueueItem(nextQItem);"); output.println(" }"); output.println(" }"); + + output.println(" }else{"); + output.println(" nextQItem=getNextQueueItem(nextQItem);"); + output.println(" }"); + output.println(" } "); // end of while(nextQItem!=NULL) output.println(" }"); output.println(" }"); @@ -3705,7 +3719,8 @@ public class BuildCode { output.println(" }"); output.println(" }"); output.println(" if(giveCount>0){"); - output.println(" psem_give(&(___params___->common.parent->memoryStallSiteSem));"); +// output.println(" psem_give(&(___params___->common.parent->memoryStallSiteSem));"); + output.println(" pthread_cond_signal(&(___params___->common.parent->stallDone));"); output.println(" }"); output.println(" }"); @@ -3718,11 +3733,12 @@ public class BuildCode { // last of all, decrement your parent's number of running children output.println(" if( "+paramsprefix+"->common.parent != NULL ) {"); - output.println(" pthread_mutex_lock( &("+paramsprefix+"->common.parent->lock) );"); - output.println(" --("+paramsprefix+"->common.parent->numRunningChildren);"); - output.println(" pthread_cond_signal( &("+paramsprefix+"->common.parent->runningChildrenCond) );"); - output.println(" pthread_mutex_unlock( &("+paramsprefix+"->common.parent->lock) );"); - output.println(" }"); + output.println(" if (atomic_sub_and_test(1, &"+paramsprefix+"->common.parent->numRunningChildren)) {"); + output.println(" pthread_mutex_lock( &("+paramsprefix+"->common.parent->lock) );"); + output.println(" pthread_cond_signal( &("+paramsprefix+"->common.parent->runningChildrenCond) );"); + output.println(" pthread_mutex_unlock( &("+paramsprefix+"->common.parent->lock) );"); + output.println(" }"); + output.println(" }"); // this is a thread-only variable that can be handled when critical sese-to-sese // data has been taken care of--set sese pointer to remember self over method @@ -3730,7 +3746,7 @@ public class BuildCode { output.println(" seseCaller = (SESEcommon*) 0x1;"); } - + public void generateFlatWriteDynamicVarNode( FlatMethod fm, LocalityBinding lb, FlatWriteDynamicVarNode fwdvn, diff --git a/Robust/src/Runtime/mlp_runtime.c b/Robust/src/Runtime/mlp_runtime.c index 1fa12b1d..fb52b534 100644 --- a/Robust/src/Runtime/mlp_runtime.c +++ b/Robust/src/Runtime/mlp_runtime.c @@ -81,8 +81,11 @@ int addWaitingQueueElement(AllocSite* allocSiteArray, int numAllocSites, long al if(allocSiteArray[i].id==allocID){ if(isRunnableNewElement(allocSiteArray[i].waitingQueue,wElement)){ + wElement->resolved=1; + addNewItemBack(allocSiteArray[i].waitingQueue,wElement); return 0; }else{ + wElement->resolved=0; addNewItemBack(allocSiteArray[i].waitingQueue,wElement); return 1; } diff --git a/Robust/src/Runtime/mlp_runtime.h b/Robust/src/Runtime/mlp_runtime.h index cbf8858c..9e58418d 100644 --- a/Robust/src/Runtime/mlp_runtime.h +++ b/Robust/src/Runtime/mlp_runtime.h @@ -66,6 +66,8 @@ typedef struct SESEcommon_t { int numRelatedWaitingQueue; int waitingQueueItemID; + pthread_cond_t stallDone; + } SESEcommon; @@ -73,6 +75,7 @@ typedef struct WaitingElement_t{ void* seseRec; int status; int id; + int resolved; struct Queue* list; } WaitingElement; @@ -97,4 +100,5 @@ int addWaitingQueueElement(AllocSite* allocSiteArray, int numAllocSites, long al WaitingElement* mlpCreateWaitingElement(int status, void* seseToIssue, struct Queue* queue, int id); void* mlpAllocSESErecord( int size ); + #endif /* __MLP_RUNTIME__ */ -- 2.34.1