#define __MLP_RUNTIME__
+#include <stdlib.h>
+#include <stdio.h>
+
+
#include <pthread.h>
#include "runtime.h"
#include "mem.h"
#include "mlp_lock.h"
#include "memPool.h"
+
+
#ifndef FALSE
#define FALSE 0
#endif
// these are useful for interpreting an INTPTR to an
// Object at runtime to retrieve the object's type
// or object id (OID), 64-bit safe
-#define OBJPTRPTR_2_OBJTYPE( opp ) ((int*)(opp))[0]
-#define OBJPTRPTR_2_OBJOID( opp ) ((int*)(opp))[1]
+#define OBJPTRPTR_2_OBJTYPE( opp ) ((int*)*(opp))[0]
+#define OBJPTRPTR_2_OBJOID( opp ) ((int*)*(opp))[1]
// forwarding list elements is a linked
// structure of arrays, should help task
struct REntry_t* rentryArray[NUMRENTRY];
struct REntry_t* unresolvedRentryArray[NUMRENTRY];
-
#ifdef RCR
int offsetToParamRecords;
volatile int rcrstatus;
// fine read:0, fine write:1, parent read:2,
// parent write:3 coarse: 4, parent coarse:5, scc: 6
int type;
- struct Hashtable_t* hashtable;
+ int tag;
+ MemoryQueueItem *qitem;
struct BinItem_t* binitem;
- struct Vector_t* vector;
- struct SCC_t* scc;
struct MemoryQueue_t* queue;
- psemaphore * parentStallSem;
- int tag;
SESEcommon* seseRec;
INTPTR* pointer;
+ psemaphore * parentStallSem;
+#ifdef RCR
+ INTPTR mask;
+#endif
int isBufMode;
} REntry;
//atomic_inc( &(s->refCount) );
}
-
-
-
// simple mechanical allocation and
// deallocation of SESE records
void* mlpAllocSESErecord( int size );
MemoryQueue** mlpCreateMemoryQueueArray(int numMemoryQueue);
REntry* mlpCreateFineREntry(MemoryQueue *q, int type, SESEcommon* seseToIssue, void* dynID);
+#ifdef RCR
+REntry* mlpCreateREntry(MemoryQueue *q, int type, SESEcommon* seseToIssue, INTPTR mask);
+#else
REntry* mlpCreateREntry(MemoryQueue *q, int type, SESEcommon* seseToIssue);
+#endif
MemoryQueue* createMemoryQueue();
void rehashMemoryQueue(SESEcommon* seseParent);
-
static inline void ADD_REFERENCE_TO( SESEcommon* seseRec ) {
atomic_inc( &(seseRec->refCount) );
}
-static inline void RELEASE_REFERENCE_TO( SESEcommon* seseRec ) {
+static inline int RELEASE_REFERENCE_TO( SESEcommon* seseRec ) {
if( atomic_sub_and_test( 1, &(seseRec->refCount) ) ) {
poolfreeinto( seseRec->parent->taskRecordMemPool, seseRec );
+ return 1;
}
+ return 0;
}
-static MemPool* taskpoolcreate( int itemSize ) {
- MemPool* p = RUNMALLOC( sizeof( MemPool ) );
- SESEcommon *c = (SESEcommon *) p;
- pthread_cond_init( &(c->runningChildrenCond), NULL );
- pthread_mutex_init( &(c->lock), NULL );
-
- p->itemSize = itemSize;
- p->head = RUNMALLOC( itemSize );
- p->head->next = NULL;
- p->tail = p->head;
- return p;
-}
+#define CHECK_RECORD(x) ;
-static inline void* taskpoolalloc( MemPool* p ) {
-
- // to protect CAS in poolfree from dereferencing
- // null, treat the queue as empty when there is
- // only one item. The dequeue operation is only
- // executed by the thread that owns the pool, so
- // it doesn't require an atomic op
- MemPoolItem* headCurrent = p->head;
- MemPoolItem* next=headCurrent->next;
- int i;
- if(next == NULL) {
- // only one item, so don't take from pool
- SESEcommon *c = (SESEcommon*) RUNMALLOC( p->itemSize );
- pthread_cond_init( &(c->runningChildrenCond), NULL );
- pthread_mutex_init( &(c->lock), NULL );
- return c;
- }
-
- p->head = next;
-
- //////////////////////////////////////////////////////////
- //
- //
- // static inline void prefetch(void *x)
- // {
- // asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
- // }
- //
- //
- // but this built-in gcc one seems the most portable:
- //////////////////////////////////////////////////////////
- //__builtin_prefetch( &(p->head->next) );
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
- next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
- next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
- next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
- asm volatile( "prefetcht0 (%0)" :: "r" (next));
-
- return (void*)headCurrent;
+////////////////////////////////////////////////
+//
+// Some available debug versions of the above
+// pool allocation-related helpers. The lower
+// 'x' appended to names means they are not hooked
+// up, but check em in so we can switch names and
+// use them for debugging
+//
+////////////////////////////////////////////////
+#define ADD_REFERENCE_TOx(x) atomic_inc( &((x)->refCount) ); printf("0x%x ADD 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__);
+
+#define RELEASE_REFERENCE_TOx(x) if (atomic_sub_and_test(1, &((x)->refCount))) {poolfreeinto(x->parent->taskRecordMemPool, x);printf("0x%x REL 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__);}
+
+#define CHECK_RECORDx(x) { \
+ if( ((SESEcommon*)(x))->refCount != 0 ) { \
+ printf( "Acquired 0x%x from poolalloc, with refCount=%d\n", (INTPTR)(x), ((SESEcommon*)(x))->refCount ); } \
+ if( ((SESEcommon*)(x))->fresh != 1 ) { \
+ printf("0x%x reclaimed 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__); } \
+ ((SESEcommon*)(x))->fresh = 0; \
}
+
+
+// this is for using a memPool to allocate task records,
+// pass this into the poolcreate so it will run your
+// custom init code ONLY for fresh records, reused records
+// can be returned as is
+void freshTaskRecordInitializer( void* seseRecord );
+
+
#endif /* __MLP_RUNTIME__ */