PrefetchAnalysis pa;
MLPAnalysis mlpa;
OoOJavaAnalysis oooa;
+ String maxTaskRecSizeStr="__maxTaskRecSize___";
String mlperrstr = "if(status != 0) { "+
"sprintf(errmsg, \"MLP error at %s:%d\", __FILE__, __LINE__); "+
"perror(errmsg); exit(-1); }";
outmethodheader.println("#include <string.h>");
outmethodheader.println("#include \"mlp_runtime.h\"");
outmethodheader.println("#include \"psemaphore.h\"");
+ outmethodheader.println("#include \"memPool.h\"");
+
+ // spit out a global to inform all worker threads with
+ // the maximum size is for any task record
+ outmethodheader.println("extern int "+maxTaskRecSizeStr+";");
}
/* Output Structures */
outputTaskTypes(outtask);
}
- if( state.MLP || state.OOOJAVA) {
+ if( state.MLP || state.OOOJAVA) {
// have to initialize some SESE compiler data before
// analyzing normal methods, which must happen before
// generating SESE internal code
//get effect set
Hashtable<Taint, Set<Effect>> effects=oooa.getDisjointAnalysis().getEffectsAnalysis().get(fsen);
- rcr.traverseSESEBlock(fsen, effects, conflicts, rg);
+ //rcr.traverseSESEBlock(fsen, effects, conflicts, rg);
}
}
}
// Output function prototypes and structures for SESE's and code
if( state.MLP || state.OOOJAVA ) {
+ // spit out a global to inform all worker threads with
+ // the maximum size is for any task record
+ outmethod.println("int "+maxTaskRecSizeStr+" = 0;");
+
// used to differentiate, during code generation, whether we are
// passing over SESE body code, or non-SESE code
nonSESEpass = false;
outmethod.println(" int i;");
if (state.MLP || state.OOOJAVA) {
- //outmethod.println(" pthread_once( &mlpOnceObj, mlpInitOncePerThread );");
+
+ // do a calculation to determine which task record
+ // is the largest, store that as a global value for
+ // allocating records
+ Iterator<FlatSESEEnterNode> seseit;
+ if(state.MLP){
+ seseit=mlpa.getAllSESEs().iterator();
+ }else{
+ seseit=oooa.getAllSESEs().iterator();
+ }
+ while(seseit.hasNext()){
+ FlatSESEEnterNode fsen = seseit.next();
+ outmethod.println("if( sizeof( "+fsen.getSESErecordName()+
+ " ) > "+maxTaskRecSizeStr+
+ " ) { "+maxTaskRecSizeStr+
+ " = sizeof( "+fsen.getSESErecordName()+
+ " ); }" );
+ }
+
outmethod.println(" workScheduleInit( "+state.MLP_NUMCORES+", invokeSESEmethod );");
outmethod.println("#include \"methodheaders.h\"");
outmethod.println("#include \"virtualtable.h\"");
outmethod.println("#include \"runtime.h\"");
+
+ // always include: compiler directives will leave out
+ // instrumentation when option is not set
+ outmethod.println("#include \"coreprof/coreprof.h\"");
+
if (state.SANDBOX) {
outmethod.println("#include \"sandboxdefs.c\"");
}
outmethod.println("#include \"RuntimeConflictResolver.h\"");
}
}
- if (state.COREPROF) {
- outmethod.println("#include \"coreprof.h\"");
- }
//Store the sizes of classes & array elements
generateSizeArray(outmethod);
// initialize thread-local var to a non-zero, invalid address
output.println(" seseCaller = (SESEcommon*) 0x2;");
-
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKEXECUTE, CP_EVENTTYPE_BEGIN );");
+ if( state.COREPROF ) {
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKEXECUTE, CP_EVENTTYPE_BEGIN );");
+ }
HashSet<FlatNode> exitset=new HashSet<FlatNode>();
exitset.add(seseExit);
(state.OOOJAVA && fsen.equals( oooa.getMainSESE() ))
) {
outmethod.println( " /* work scheduler works forever, explicitly exit */");
- if (state.COREPROF) {
- outmethod.println(" CP_EXIT();");
- outmethod.println(" CP_DUMP();");
- }
+ outmethod.println( " CP_EXIT();");
+ outmethod.println( " CP_DUMP();");
outmethod.println( " workScheduleExit();");
outmethod.println( " exit( 0 );");
}
output.println(" {");
output.println(" SESEcommon* common = (SESEcommon*) "+pair+";");
- //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_BEGIN );");
+ if( state.COREPROF ) {
+ //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_BEGIN );");
+ }
output.println(" pthread_mutex_lock( &(common->lock) );");
output.println(" while( common->doneExecuting == FALSE ) {");
output.println(" pthread_cond_wait( &(common->doneCond), &(common->lock) );");
output.println(" "+generateTemp( fmContext, td, null )+
" = child->"+vst.getAddrVar().getSafeSymbol()+";");
}
-
- //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_END );");
+ if( state.COREPROF ) {
+ //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_END );");
+ }
output.println(" }");
}
// otherwise the dynamic write nodes will have the local var up-to-date
output.println(" {");
output.println(" if( "+dynVar+"_srcSESE != NULL ) {");
- //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_BEGIN );");
+ if( state.COREPROF ) {
+ //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_BEGIN );");
+ }
output.println(" SESEcommon* common = (SESEcommon*) "+dynVar+"_srcSESE;");
output.println(" psem_take( &(common->stallSem) );");
output.println(" "+generateTemp( fmContext, dynVar, null )+
" = *(("+typeStr+"*) ("+
dynVar+"_srcSESE + "+dynVar+"_srcOffset));");
- //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_END );");
+ if( state.COREPROF ) {
+ //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLVAR, CP_EVENTTYPE_END );");
+ }
output.println(" }");
output.println(" }");
}
output.println(" rentry->queue=seseCaller->memoryQueueArray["+ waitingElement.getQueueID()+ "];");
output.println(" if(ADDRENTRY(seseCaller->memoryQueueArray["+ waitingElement.getQueueID()
+ "],rentry)==NOTREADY){");
- //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_BEGIN );");
+ if( state.COREPROF ) {
+ //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_BEGIN );");
+ }
output.println(" psem_take( &(rentry->parentStallSem) );");
- //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_END );");
+ if( state.COREPROF ) {
+ //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_END );");
+ }
output.println(" } ");
if(state.RCR && rcr != null) {
output
.println(" if(ADDRENTRY(seseCaller->memoryQueueArray["+ waitingElement.getQueueID()
+ "],rentry)==NOTREADY){");
- //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_BEGIN );");
+ if( state.COREPROF ) {
+ //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_BEGIN );");
+ }
output.println(" psem_take( &(rentry->parentStallSem) );");
- //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_END );");
+ if( state.COREPROF ) {
+ //output.println(" CP_LOGEVENT( CP_EVENTID_TASKSTALLMEM, CP_EVENTTYPE_END );");
+ }
output.println(" } ");
}
output.println(" }");
output.println(" {");
-
- output.println("CP_LOGEVENT( CP_EVENTID_TASKDISPATCH, CP_EVENTTYPE_BEGIN );");
+ if( state.COREPROF ) {
+ output.println("CP_LOGEVENT( CP_EVENTID_TASKDISPATCH, CP_EVENTTYPE_BEGIN );");
+ }
// set up the parent
if( (state.MLP && fsen == mlpa.getMainSESE()) ||
// eventually, for it to mark itself finished
// output.println(" pthread_mutex_unlock( &(seseToIssue->common.lock) );");
-
- output.println("CP_LOGEVENT( CP_EVENTID_TASKDISPATCH, CP_EVENTTYPE_END );");
+ if( state.COREPROF ) {
+ output.println("CP_LOGEVENT( CP_EVENTID_TASKDISPATCH, CP_EVENTTYPE_END );");
+ }
output.println(" }");
if( fsen.getIsCallerSESEplaceholder() ) {
return;
}
-
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKEXECUTE, CP_EVENTTYPE_END );");
+
+ if( state.COREPROF ) {
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKEXECUTE, CP_EVENTTYPE_END );");
+ }
output.println(" /* SESE exiting */");
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKRETIRE, CP_EVENTTYPE_BEGIN );");
+
+ if( state.COREPROF ) {
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKRETIRE, CP_EVENTTYPE_BEGIN );");
+ }
String com = paramsprefix+"->common";
// calls to a non-zero, invalid address
output.println(" seseCaller = (SESEcommon*) 0x1;");
-
- output.println(" CP_LOGEVENT( CP_EVENTID_TASKRETIRE, CP_EVENTTYPE_END );");
+ if( state.COREPROF ) {
+ output.println(" CP_LOGEVENT( CP_EVENTID_TASKRETIRE, CP_EVENTTYPE_END );");
+ }
}
public void generateFlatWriteDynamicVarNode( FlatMethod fm,
--- /dev/null
+#ifndef ___MEMPOOL_H__
+#define ___MEMPOOL_H__
+
+//////////////////////////////////////////////////////////
+//
+// A memory pool implements POOLCREATE, POOLALLOC and
+// POOLFREE to improve memory allocation by reusing records.
+//
+// This implementation uses a lock-free singly-linked list
+// to store reusable records. The list is initialized with
+// one valid record, and the list is considered empty when
+// it has only one record; this allows the enqueue operation's
+// CAS to assume tail can always be dereferenced.
+//
+// poolfree adds newly freed records to the list BACK
+//
+// poolalloc either takes records from FRONT or mallocs
+//
+//////////////////////////////////////////////////////////
+
+#include <stdlib.h>
+#include "mlp_lock.h"
+
+
+// The cache line size is set for the AMD Opteron 6168 (dc-10)
+// that has L1 and L2 cache line sizes of 64 bytes. Source:
+// http://www.cs.virginia.edu/~skadron/cs451/opteron/opteron.ppt
+#define CACHELINESIZE 64
+
+
+typedef struct MemPoolItem_t {
+ void* next;
+} MemPoolItem;
+
+
+typedef struct MemPool_t {
+ int itemSize;
+ MemPoolItem* head;
+
+ // avoid cache line contention between producer/consumer...
+ char buffer[CACHELINESIZE - sizeof(void*)];
+
+ MemPoolItem* tail;
+} MemPool;
+
+
+// the memory pool must always have at least one
+// item in it
+static MemPool* poolcreate( int itemSize ) {
+ MemPool* p = malloc( sizeof( MemPool ) );
+ p->itemSize = itemSize;
+ p->head = malloc( itemSize );
+ p->head->next = NULL;
+ p->tail = p->head;
+}
+
+
+// CAS
+// in: a ptr, expected old, desired new
+// return: actual old
+//
+// Pass in a ptr, what you expect the old value is and
+// what you want the new value to be.
+// The CAS returns what the value is actually: if it matches
+// your proposed old value then you assume the update was successful,
+// otherwise someone did CAS before you, so try again (the return
+// value is the old value you will pass next time.)
+
+static inline void poolfree( MemPool* p, void* ptr ) {
+
+ MemPoolItem* tailCurrent;
+ MemPoolItem* tailActual;
+
+ // set up the now unneeded record to as the tail of the
+ // free list by treating its first bytes as next pointer,
+ MemPoolItem* tailNew = (MemPoolItem*) ptr;
+ tailNew->next = NULL;
+
+ while( 1 ) {
+ // make sure the null happens before the insertion,
+ // also makes sure that we reload tailCurrent, etc..
+ BARRIER();
+
+ tailCurrent = p->tail;
+ tailActual = (MemPoolItem*)
+ CAS( &(p->tail), // ptr to set
+ (long) tailCurrent, // current tail's next should be NULL
+ (long) tailNew // try set to our new tail
+ );
+ if( tailActual == tailCurrent ) {
+ // success, update tail
+ tailCurrent->next = tailNew;
+ return;
+ }
+
+ // if CAS failed, retry entire operation
+ }
+}
+
+
+static inline void* poolalloc( MemPool* p ) {
+
+ // to protect CAS in poolfree from dereferencing
+ // null, treat the queue as empty when there is
+ // only one item. The dequeue operation is only
+ // executed by the thread that owns the pool, so
+ // it doesn't require an atomic op
+ MemPoolItem* headCurrent = p->head;
+
+ if( headCurrent->next == NULL ) {
+ // only one item, so don't take from pool
+ return calloc( 1, p->itemSize );
+ }
+
+ p->head = headCurrent->next;
+ return headCurrent;
+}
+
+
+#endif // ___MEMPOOL_H__
+
+
+
+
+
+
+
+
+
+