From: jjenista Date: Wed, 13 Oct 2010 23:15:00 +0000 (+0000) Subject: added prefetch when grabbing a record for next one, and fixed a malloc to RUNMALLOC X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=a1d3d1248b18563a6f84963ca4f888189465281e;p=IRC.git added prefetch when grabbing a record for next one, and fixed a malloc to RUNMALLOC --- diff --git a/Robust/src/Runtime/memPool.h b/Robust/src/Runtime/memPool.h index b9b55cf6..90102e4e 100644 --- a/Robust/src/Runtime/memPool.h +++ b/Robust/src/Runtime/memPool.h @@ -19,6 +19,7 @@ ////////////////////////////////////////////////////////// #include +#include "mem.h" #include "mlp_lock.h" @@ -99,6 +100,7 @@ static inline void poolfreeinto( MemPool* p, void* ptr ) { } + static inline void* poolalloc( MemPool* p ) { // to protect CAS in poolfree from dereferencing @@ -110,13 +112,27 @@ static inline void* poolalloc( MemPool* p ) { if( headCurrent->next == NULL ) { // only one item, so don't take from pool - return malloc( p->itemSize ); + return RUNMALLOC( p->itemSize ); } p->head = headCurrent->next; - // just until uninitialized mem bug found - //memset( headCurrent, 0, p->itemSize ); + + ////////////////////////////////////////////////////////// + // + // a prefetch statement from the Linux kernel, + // which the little "m" depends on architecture: + // + // static inline void prefetch(void *x) + // { + // asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); + // } + // + // + // but this built-in gcc one seems the most portable: + ////////////////////////////////////////////////////////// + __builtin_prefetch( &(p->head->next) ); + return headCurrent; }