4 //////////////////////////////////////////////////////////
6 // A memory pool implements POOLCREATE, POOLALLOC and
7 // POOLFREE to improve memory allocation by reusing records.
9 // This implementation uses a lock-free singly-linked list
10 // to store reusable records. The list is initialized with
11 // one valid record, and the list is considered empty when
12 // it has only one record; this allows the enqueue operation's
13 // CAS to assume tail can always be dereferenced.
15 // poolfree adds newly freed records to the list BACK
17 // poolalloc either takes records from FRONT or mallocs
19 //////////////////////////////////////////////////////////
23 #ifdef MEMPOOL_DETECT_MISUSE
29 static INTPTR pageSize;
36 #define CACHELINESIZE 64
40 typedef struct MemPoolItem_t {
41 struct MemPoolItem_t* next;
45 typedef struct MemPool_t {
48 // only invoke this on items that are
49 // actually new, saves time for reused
51 void (*initFreshlyAllocated)(void*);
53 #ifdef MEMPOOL_DETECT_MISUSE
59 // avoid cache line contention between producer/consumer...
60 char buffer[CACHELINESIZE];
66 // the memory pool must always have at least one
68 static MemPool* poolcreate(int itemSize,
69 void (*initializer)(void*)
72 MemPool* p = RUNMALLOC(sizeof( MemPool ) );
73 p->itemSize = itemSize;
75 p->initFreshlyAllocated = initializer;
77 #ifdef MEMPOOL_DETECT_MISUSE
78 // when detecting misuse, round the item size
79 // up to a page and add a page, so whatever
80 // allocated memory you get, you can use a
81 // page-aligned subset as the record
82 pageSize = sysconf(_SC_PAGESIZE);
84 if( itemSize % pageSize == 0 ) {
85 // if the item size is already an exact multiple
86 // of the page size, just increase alloc by one page
87 p->allocSize = itemSize + pageSize;
89 // and size for mprotect should be exact page multiple
90 p->protectSize = itemSize;
92 // otherwise, round down to a page size, then add two
93 p->allocSize = (itemSize & ~(pageSize-1)) + 2*pageSize;
95 // and size for mprotect should be exact page multiple
96 // so round down, add one
97 p->protectSize = (itemSize & ~(pageSize-1)) + pageSize;
102 p->head = RUNMALLOC(p->itemSize);
104 if( p->initFreshlyAllocated != NULL ) {
105 p->initFreshlyAllocated(p->head);
108 p->head->next = NULL;
117 #ifdef MEMPOOL_DETECT_MISUSE
119 static inline void poolfreeinto(MemPool* p, void* ptr) {
120 // don't actually return memory to the pool, just lock
121 // it up tight so first code to touch it badly gets caught
122 // also, mprotect automatically protects full pages
123 if( mprotect(ptr, p->protectSize, PROT_NONE) != 0 ) {
128 printf("mprotect failed, ENOMEM.\n");
132 printf("mprotect failed, errno=%d.\n", errno);
135 printf("itemSize is 0x%x, allocSize is 0x%x, protectSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize, (INTPTR)p->protectSize);
136 printf("Intended to protect 0x%x to 0x%x,\n\n", (INTPTR)ptr, (INTPTR)ptr + (INTPTR)(p->protectSize) );
146 static inline void poolfreeinto(MemPool* p, void* ptr) {
147 MemPoolItem* tailNew = (MemPoolItem*) ptr;
148 tailNew->next = NULL;
150 MemPoolItem *tailCurrent=(MemPoolItem *) LOCKXCHG((INTPTR *) &p->tail, (INTPTR) tailNew);
151 tailCurrent->next=tailNew;
157 #ifdef MEMPOOL_DETECT_MISUSE
159 static inline void* poolalloc(MemPool* p) {
160 // put the memory we intend to expose to client
161 // on a page-aligned boundary, always return
164 INTPTR nonAligned = (INTPTR) RUNMALLOC(p->allocSize);
166 void* newRec = (void*)((nonAligned + pageSize-1) & ~(pageSize-1));
168 //printf( "PageSize is %d or 0x%x.\n", (INTPTR)pageSize, (INTPTR)pageSize );
169 //printf( "itemSize is 0x%x, allocSize is 0x%x, protectSize is 0x%x.\n", (INTPTR)p->itemSize, (INTPTR)p->allocSize, (INTPTR)p->protectSize );
170 //printf( "Allocation returned 0x%x to 0x%x,\n", (INTPTR)nonAligned, (INTPTR)nonAligned + (INTPTR)(p->allocSize) );
171 //printf( "Intend to use 0x%x to 0x%x,\n\n", (INTPTR)newRec, (INTPTR)newRec + (INTPTR)(p->itemSize) );
173 // intentionally touch the top of the new, aligned record in terms of the
174 // pages that will be locked when it eventually is free'd
175 INTPTR topOfRec = (INTPTR)newRec;
176 topOfRec += p->protectSize - 1;
177 ((char*)topOfRec)[0] = 0x1;
179 if( p->initFreshlyAllocated != NULL ) {
180 p->initFreshlyAllocated(newRec);
189 static inline void* poolalloc(MemPool* p) {
191 // to protect CAS in poolfree from dereferencing
192 // null, treat the queue as empty when there is
193 // only one item. The dequeue operation is only
194 // executed by the thread that owns the pool, so
195 // it doesn't require an atomic op
196 MemPoolItem* headCurrent = p->head;
197 MemPoolItem* next=headCurrent->next;
202 // only one item, so don't take from pool
203 void *newRec=RUNMALLOC(p->itemSize);
204 if( p->initFreshlyAllocated != NULL ) {
205 p->initFreshlyAllocated(newRec);
212 asm volatile ( "prefetcht0 (%0)" :: "r" (next));
213 next=(MemPoolItem*)(((char *)next)+CACHELINESIZE);
214 asm volatile ( "prefetcht0 (%0)" :: "r" (next));
216 return (void*)headCurrent;
222 static void pooldestroy(MemPool* p) {
224 #ifndef MEMPOOL_DETECT_MISUSE
225 MemPoolItem* i = p->head;
239 #endif // ___MEMPOOL_H__