4 //////////////////////////////////////////////////////////
6 // A memory pool implements POOLCREATE, POOLALLOC and
7 // POOLFREE to improve memory allocation by reusing records.
9 // This implementation uses a lock-free singly-linked list
10 // to store reusable records. The list is initialized with
11 // one valid record, and the list is considered empty when
12 // it has only one record; this allows the enqueue operation's
13 // CAS to assume tail can always be dereferenced.
15 // poolfree adds newly freed records to the list BACK
17 // poolalloc either takes records from FRONT or mallocs
19 //////////////////////////////////////////////////////////
27 #define CACHELINESIZE 64
30 typedef struct MemPoolItem_t {
34 typedef struct MemPool_t {
38 // avoid cache line contention between producer/consumer...
39 char buffer[CACHELINESIZE - sizeof(void*)];
45 // the memory pool must always have at least one
47 static MemPool* poolcreate( int itemSize ) {
48 MemPool* p = RUNMALLOC( sizeof( MemPool ) );
49 p->itemSize = itemSize;
50 p->head = RUNMALLOC( itemSize );
58 // in: a ptr, expected old, desired new
61 // Pass in a ptr, what you expect the old value is and
62 // what you want the new value to be.
63 // The CAS returns what the value is actually: if it matches
64 // your proposed old value then you assume the update was successful,
65 // otherwise someone did CAS before you, so try again (the return
66 // value is the old value you will pass next time.)
68 static inline void poolfreeinto( MemPool* p, void* ptr ) {
70 MemPoolItem* tailCurrent;
71 MemPoolItem* tailActual;
73 // set up the now unneeded record to as the tail of the
74 // free list by treating its first bytes as next pointer,
75 MemPoolItem* tailNew = (MemPoolItem*) ptr;
79 // make sure the null happens before the insertion,
80 // also makes sure that we reload tailCurrent, etc..
83 tailCurrent = p->tail;
84 tailActual = (MemPoolItem*)
85 CAS( &(p->tail), // ptr to set
86 (INTPTR) tailCurrent, // current tail's next should be NULL
87 (INTPTR) tailNew // try set to our new tail
89 if( tailActual == tailCurrent ) {
90 // success, update tail
91 tailCurrent->next = tailNew;
95 // if CAS failed, retry entire operation
99 static inline void* poolalloc( MemPool* p ) {
101 // to protect CAS in poolfree from dereferencing
102 // null, treat the queue as empty when there is
103 // only one item. The dequeue operation is only
104 // executed by the thread that owns the pool, so
105 // it doesn't require an atomic op
106 MemPoolItem* headCurrent = p->head;
107 MemPoolItem* next=headCurrent->next;
110 // only one item, so don't take from pool
111 return (void*) RUNMALLOC( p->itemSize );
116 //////////////////////////////////////////////////////////
119 // static inline void prefetch(void *x)
121 // asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
125 // but this built-in gcc one seems the most portable:
126 //////////////////////////////////////////////////////////
127 //__builtin_prefetch( &(p->head->next) );
128 asm volatile( "prefetcht0 (%0)" :: "r" (next));
129 next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
130 asm volatile( "prefetcht0 (%0)" :: "r" (next));
132 return (void*)headCurrent;
136 static void pooldestroy( MemPool* p ) {
137 MemPoolItem* i = p->head;
150 #endif // ___MEMPOOL_H__