From ef22dbd9f1f5dcd4923f6b5ac191d72a8174c669 Mon Sep 17 00:00:00 2001 From: jjenista Date: Fri, 29 Oct 2010 18:55:48 +0000 Subject: [PATCH] commit some useful debug stuff all turned off until needed, trying to get workspace synched to prevent constant conflicts --- Robust/src/IR/Flat/BuildCode.java | 1 + Robust/src/Runtime/memPool.h | 9 +++++--- Robust/src/Runtime/mlp_runtime.c | 5 ++-- Robust/src/Runtime/mlp_runtime.h | 38 +++++++++++++++++++++++++++++-- 4 files changed, 45 insertions(+), 8 deletions(-) diff --git a/Robust/src/IR/Flat/BuildCode.java b/Robust/src/IR/Flat/BuildCode.java index 19e884d6..a4644c6c 100644 --- a/Robust/src/IR/Flat/BuildCode.java +++ b/Robust/src/IR/Flat/BuildCode.java @@ -3934,6 +3934,7 @@ public class BuildCode { output.println(" "+ fsen.getSESErecordName()+"* seseToIssue = ("+ fsen.getSESErecordName()+"*) poolalloc( runningSESE->taskRecordMemPool );"); + output.println(" CHECK_RECORD( seseToIssue );"); } else { output.println(" "+ fsen.getSESErecordName()+"* seseToIssue = ("+ diff --git a/Robust/src/Runtime/memPool.h b/Robust/src/Runtime/memPool.h index 0662e5fc..6c75f647 100644 --- a/Robust/src/Runtime/memPool.h +++ b/Robust/src/Runtime/memPool.h @@ -133,6 +133,9 @@ static inline void poolfreeinto( MemPool* p, void* ptr ) { printf( "mprotect failed, errno=%d.\n", errno ); } + printf( "itemSize is 0x%x, allocSize is 0x%x, protectSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize, (INTPTR)p->protectSize ); + printf( "Intended to protect 0x%x to 0x%x,\n\n", (INTPTR)ptr, (INTPTR)ptr + (INTPTR)(p->protectSize) ); + exit( -1 ); } } @@ -148,6 +151,7 @@ static inline void poolfreeinto( MemPool* p, void* ptr ) { // set up the now unneeded record to as the tail of the // free list by treating its first bytes as next pointer, MemPoolItem* tailNew = (MemPoolItem*) ptr; + tailNew->next = NULL; while( 1 ) { @@ -180,12 +184,13 @@ static inline void* poolalloc( MemPool* p ) { // put the memory we intend to expose to client // on a page-aligned boundary, always return // new memory + INTPTR nonAligned = (INTPTR) RUNMALLOC( p->allocSize ); void* newRec = (void*)((nonAligned + pageSize-1) & ~(pageSize-1)); //printf( "PageSize is %d or 0x%x.\n", (INTPTR)pageSize, (INTPTR)pageSize ); - //printf( "itemSize is 0x%x and allocSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize ); + //printf( "itemSize is 0x%x, allocSize is 0x%x, protectSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize, (INTPTR)p->protectSize ); //printf( "Allocation returned 0x%x to 0x%x,\n", (INTPTR)nonAligned, (INTPTR)nonAligned + (INTPTR)(p->allocSize) ); //printf( "Intend to use 0x%x to 0x%x,\n\n", (INTPTR)newRec, (INTPTR)newRec + (INTPTR)(p->itemSize) ); @@ -195,8 +200,6 @@ static inline void* poolalloc( MemPool* p ) { topOfRec += p->protectSize - 1; ((char*)topOfRec)[0] = 0x1; - - if( p->initFreshlyAllocated != NULL ) { p->initFreshlyAllocated( newRec ); } diff --git a/Robust/src/Runtime/mlp_runtime.c b/Robust/src/Runtime/mlp_runtime.c index 79be46fb..9c29d0ce 100644 --- a/Robust/src/Runtime/mlp_runtime.c +++ b/Robust/src/Runtime/mlp_runtime.c @@ -25,9 +25,8 @@ void freshTaskRecordInitializer( void* seseRecord ) { SESEcommon* c = (SESEcommon*) seseRecord; pthread_cond_init( &(c->runningChildrenCond), NULL ); pthread_mutex_init( &(c->lock), NULL ); - - // no need to use return value yet, future maybe - //return NULL; + c->refCount = 0; + //c->fresh = 1; } diff --git a/Robust/src/Runtime/mlp_runtime.h b/Robust/src/Runtime/mlp_runtime.h index 8af7cfbe..9e9bc988 100644 --- a/Robust/src/Runtime/mlp_runtime.h +++ b/Robust/src/Runtime/mlp_runtime.h @@ -2,6 +2,10 @@ #define __MLP_RUNTIME__ +#include +#include + + #include #include "runtime.h" #include "mem.h" @@ -10,6 +14,8 @@ #include "mlp_lock.h" #include "memPool.h" + + #ifndef FALSE #define FALSE 0 #endif @@ -113,7 +119,6 @@ typedef struct SESEcommon_t { struct REntry_t* rentryArray[NUMRENTRY]; struct REntry_t* unresolvedRentryArray[NUMRENTRY]; - #ifdef RCR int offsetToParamRecords; volatile int rcrstatus; @@ -262,16 +267,45 @@ MemoryQueue* createMemoryQueue(); void rehashMemoryQueue(SESEcommon* seseParent); + + static inline void ADD_REFERENCE_TO( SESEcommon* seseRec ) { atomic_inc( &(seseRec->refCount) ); } -static inline void RELEASE_REFERENCE_TO( SESEcommon* seseRec ) { +static inline int RELEASE_REFERENCE_TO( SESEcommon* seseRec ) { if( atomic_sub_and_test( 1, &(seseRec->refCount) ) ) { poolfreeinto( seseRec->parent->taskRecordMemPool, seseRec ); + return 1; } + return 0; } +#define CHECK_RECORD(x) ; + + +//////////////////////////////////////////////// +// +// Some available debug versions of the above +// pool allocation-related helpers. The lower +// 'x' appended to names means they are not hooked +// up, but check em in so we can switch names and +// use them for debugging +// +//////////////////////////////////////////////// +#define ADD_REFERENCE_TOx(x) atomic_inc( &((x)->refCount) ); printf("0x%x ADD 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__); + +#define RELEASE_REFERENCE_TOx(x) if (atomic_sub_and_test(1, &((x)->refCount))) {poolfreeinto(x->parent->taskRecordMemPool, x);printf("0x%x REL 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__);} + +#define CHECK_RECORDx(x) { \ + if( ((SESEcommon*)(x))->refCount != 0 ) { \ + printf( "Acquired 0x%x from poolalloc, with refCount=%d\n", (INTPTR)(x), ((SESEcommon*)(x))->refCount ); } \ + if( ((SESEcommon*)(x))->fresh != 1 ) { \ + printf("0x%x reclaimed 0x%x on %d\n",(INTPTR)runningSESE,(INTPTR)(x),__LINE__); } \ + ((SESEcommon*)(x))->fresh = 0; \ +} + + // this is for using a memPool to allocate task records, // pass this into the poolcreate so it will run your -- 2.34.1