2 * SLOB Allocator: Simple List Of Blocks
4 * Matt Mackall <mpm@selenic.com> 12/30/03
6 * NUMA support by Paul Mundt, 2007.
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
12 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
15 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * and within each page, there is a singly-linked list of free blocks
17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
27 * Above this is an implementation of kmalloc/kfree. Blocks returned
28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
30 * alloc_pages() directly, allocating compound pages so the page order
31 * does not have to be separately tracked.
32 * These objects are detected in kfree() because PageSlab()
35 * SLAB is emulated on top of SLOB by simply calling constructors and
36 * destructors for every SLAB allocation. Objects are returned with the
37 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
38 * case the low-level allocator will fragment blocks to create the proper
39 * alignment. Again, objects of page-size or greater are allocated by
40 * calling alloc_pages(). As SLAB objects know their size, no separate
41 * size bookkeeping is necessary and there is essentially no allocation
42 * space overhead, and compound pages aren't needed for multi-page
45 * NUMA support in SLOB is fairly simplistic, pushing most of the real
46 * logic down to the page allocator, and simply doing the node accounting
47 * on the upper levels. In the event that a node id is explicitly
48 * provided, alloc_pages_exact_node() with the specified node id is used
49 * instead. The common case (or when the node id isn't explicitly provided)
50 * will default to the current node, as per numa_node_id().
52 * Node aware pages are still inserted in to the global freelist, and
53 * these are scanned for by matching against the node id encoded in the
54 * page flags. As a result, block allocations that can be satisfied from
55 * the freelist will only be done so on pages residing on the same node,
56 * in order to prevent random node placement.
59 #include <linux/kernel.h>
60 #include <linux/slab.h>
64 #include <linux/swap.h> /* struct reclaim_state */
65 #include <linux/cache.h>
66 #include <linux/init.h>
67 #include <linux/export.h>
68 #include <linux/rcupdate.h>
69 #include <linux/list.h>
70 #include <linux/kmemleak.h>
72 #include <trace/events/kmem.h>
74 #include <linux/atomic.h>
77 * slob_block has a field 'units', which indicates size of block if +ve,
78 * or offset of next block if -ve (in SLOB_UNITs).
80 * Free blocks of size 1 unit simply contain the offset of the next block.
81 * Those with larger size contain their size in the first SLOB_UNIT of
82 * memory, and the offset of the next free block in the second SLOB_UNIT.
84 #if PAGE_SIZE <= (32767 * 2)
85 typedef s16 slobidx_t;
87 typedef s32 slobidx_t;
93 typedef struct slob_block slob_t;
96 * All partially free slob pages go on these lists.
98 #define SLOB_BREAK1 256
99 #define SLOB_BREAK2 1024
100 static LIST_HEAD(free_slob_small);
101 static LIST_HEAD(free_slob_medium);
102 static LIST_HEAD(free_slob_large);
105 * slob_page_free: true for pages on free_slob_pages list.
107 static inline int slob_page_free(struct page *sp)
109 return PageSlobFree(sp);
112 static void set_slob_page_free(struct page *sp, struct list_head *list)
114 list_add(&sp->list, list);
115 __SetPageSlobFree(sp);
118 static inline void clear_slob_page_free(struct page *sp)
121 __ClearPageSlobFree(sp);
124 #define SLOB_UNIT sizeof(slob_t)
125 #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
126 #define SLOB_ALIGN L1_CACHE_BYTES
129 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
130 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
131 * the block using call_rcu.
134 struct rcu_head head;
139 * slob_lock protects all slob allocator structures.
141 static DEFINE_SPINLOCK(slob_lock);
144 * Encode the given size and next info into a free slob block s.
146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
148 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
149 slobidx_t offset = next - base;
155 s[0].units = -offset;
159 * Return the size of a slob block.
161 static slobidx_t slob_units(slob_t *s)
169 * Return the next free slob block pointer after this one.
171 static slob_t *slob_next(slob_t *s)
173 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
184 * Returns true if s is the last free block in its page.
186 static int slob_last(slob_t *s)
188 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
191 static void *slob_new_pages(gfp_t gfp, int order, int node)
196 if (node != NUMA_NO_NODE)
197 page = alloc_pages_exact_node(node, gfp, order);
200 page = alloc_pages(gfp, order);
205 return page_address(page);
208 static void slob_free_pages(void *b, int order)
210 if (current->reclaim_state)
211 current->reclaim_state->reclaimed_slab += 1 << order;
212 free_pages((unsigned long)b, order);
216 * Allocate a slob block within a given slob_page sp.
218 static void *slob_page_alloc(struct page *sp, size_t size, int align)
220 slob_t *prev, *cur, *aligned = NULL;
221 int delta = 0, units = SLOB_UNITS(size);
223 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
224 slobidx_t avail = slob_units(cur);
227 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
228 delta = aligned - cur;
230 if (avail >= units + delta) { /* room enough? */
233 if (delta) { /* need to fragment head to align? */
234 next = slob_next(cur);
235 set_slob(aligned, avail - delta, next);
236 set_slob(cur, delta, aligned);
239 avail = slob_units(cur);
242 next = slob_next(cur);
243 if (avail == units) { /* exact fit? unlink. */
245 set_slob(prev, slob_units(prev), next);
248 } else { /* fragment */
250 set_slob(prev, slob_units(prev), cur + units);
252 sp->freelist = cur + units;
253 set_slob(cur + units, avail - units, next);
258 clear_slob_page_free(sp);
267 * slob_alloc: entry point into the slob allocator.
269 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
272 struct list_head *prev;
273 struct list_head *slob_list;
277 if (size < SLOB_BREAK1)
278 slob_list = &free_slob_small;
279 else if (size < SLOB_BREAK2)
280 slob_list = &free_slob_medium;
282 slob_list = &free_slob_large;
284 spin_lock_irqsave(&slob_lock, flags);
285 /* Iterate through each partially free page, try to find room */
286 list_for_each_entry(sp, slob_list, list) {
289 * If there's a node specification, search for a partial
290 * page with a matching node id in the freelist.
292 if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
295 /* Enough room on this page? */
296 if (sp->units < SLOB_UNITS(size))
299 /* Attempt to alloc */
300 prev = sp->list.prev;
301 b = slob_page_alloc(sp, size, align);
305 /* Improve fragment distribution and reduce our average
306 * search time by starting our next search here. (see
307 * Knuth vol 1, sec 2.5, pg 449) */
308 if (prev != slob_list->prev &&
309 slob_list->next != prev->next)
310 list_move_tail(slob_list, prev->next);
313 spin_unlock_irqrestore(&slob_lock, flags);
315 /* Not enough space: must allocate a new page */
317 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
320 sp = virt_to_page(b);
323 spin_lock_irqsave(&slob_lock, flags);
324 sp->units = SLOB_UNITS(PAGE_SIZE);
326 INIT_LIST_HEAD(&sp->list);
327 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
328 set_slob_page_free(sp, slob_list);
329 b = slob_page_alloc(sp, size, align);
331 spin_unlock_irqrestore(&slob_lock, flags);
333 if (unlikely((gfp & __GFP_ZERO) && b))
339 * slob_free: entry point into the slob allocator.
341 static void slob_free(void *block, int size)
344 slob_t *prev, *next, *b = (slob_t *)block;
347 struct list_head *slob_list;
349 if (unlikely(ZERO_OR_NULL_PTR(block)))
353 sp = virt_to_page(block);
354 units = SLOB_UNITS(size);
356 spin_lock_irqsave(&slob_lock, flags);
358 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
359 /* Go directly to page allocator. Do not pass slob allocator */
360 if (slob_page_free(sp))
361 clear_slob_page_free(sp);
362 spin_unlock_irqrestore(&slob_lock, flags);
364 reset_page_mapcount(sp);
365 slob_free_pages(b, 0);
369 if (!slob_page_free(sp)) {
370 /* This slob page is about to become partially free. Easy! */
374 (void *)((unsigned long)(b +
375 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
376 if (size < SLOB_BREAK1)
377 slob_list = &free_slob_small;
378 else if (size < SLOB_BREAK2)
379 slob_list = &free_slob_medium;
381 slob_list = &free_slob_large;
382 set_slob_page_free(sp, slob_list);
387 * Otherwise the page is already partially free, so find reinsertion
392 if (b < (slob_t *)sp->freelist) {
393 if (b + units == sp->freelist) {
394 units += slob_units(sp->freelist);
395 sp->freelist = slob_next(sp->freelist);
397 set_slob(b, units, sp->freelist);
401 next = slob_next(prev);
404 next = slob_next(prev);
407 if (!slob_last(prev) && b + units == next) {
408 units += slob_units(next);
409 set_slob(b, units, slob_next(next));
411 set_slob(b, units, next);
413 if (prev + slob_units(prev) == b) {
414 units = slob_units(b) + slob_units(prev);
415 set_slob(prev, units, slob_next(b));
417 set_slob(prev, slob_units(prev), b);
420 spin_unlock_irqrestore(&slob_lock, flags);
424 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
427 static __always_inline void *
428 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
431 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
434 gfp &= gfp_allowed_mask;
436 lockdep_trace_alloc(gfp);
438 if (size < PAGE_SIZE - align) {
440 return ZERO_SIZE_PTR;
442 m = slob_alloc(size + align, gfp, align, node);
447 ret = (void *)m + align;
449 trace_kmalloc_node(caller, ret,
450 size, size + align, gfp, node);
452 unsigned int order = get_order(size);
456 ret = slob_new_pages(gfp, order, node);
458 trace_kmalloc_node(caller, ret,
459 size, PAGE_SIZE << order, gfp, node);
462 kmemleak_alloc(ret, size, 1, gfp);
466 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
468 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
470 EXPORT_SYMBOL(__kmalloc_node);
472 #ifdef CONFIG_TRACING
473 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
475 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
479 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
480 int node, unsigned long caller)
482 return __do_kmalloc_node(size, gfp, node, caller);
487 void kfree(const void *block)
491 trace_kfree(_RET_IP_, block);
493 if (unlikely(ZERO_OR_NULL_PTR(block)))
495 kmemleak_free(block);
497 sp = virt_to_page(block);
499 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
500 unsigned int *m = (unsigned int *)(block - align);
501 slob_free(m, *m + align);
503 __free_pages(sp, compound_order(sp));
505 EXPORT_SYMBOL(kfree);
507 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
508 size_t ksize(const void *block)
515 if (unlikely(block == ZERO_SIZE_PTR))
518 sp = virt_to_page(block);
519 if (unlikely(!PageSlab(sp)))
520 return PAGE_SIZE << compound_order(sp);
522 align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
523 m = (unsigned int *)(block - align);
524 return SLOB_UNITS(*m) * SLOB_UNIT;
526 EXPORT_SYMBOL(ksize);
528 int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
530 size_t align = c->size;
532 if (flags & SLAB_DESTROY_BY_RCU) {
533 /* leave room for rcu footer at the end of object */
534 c->size += sizeof(struct slob_rcu);
537 /* ignore alignment unless it's forced */
538 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
539 if (c->align < ARCH_SLAB_MINALIGN)
540 c->align = ARCH_SLAB_MINALIGN;
541 if (c->align < align)
547 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
551 flags &= gfp_allowed_mask;
553 lockdep_trace_alloc(flags);
555 if (c->size < PAGE_SIZE) {
556 b = slob_alloc(c->size, flags, c->align, node);
557 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
558 SLOB_UNITS(c->size) * SLOB_UNIT,
561 b = slob_new_pages(flags, get_order(c->size), node);
562 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
563 PAGE_SIZE << get_order(c->size),
570 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
573 EXPORT_SYMBOL(kmem_cache_alloc_node);
575 static void __kmem_cache_free(void *b, int size)
577 if (size < PAGE_SIZE)
580 slob_free_pages(b, get_order(size));
583 static void kmem_rcu_free(struct rcu_head *head)
585 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
586 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
588 __kmem_cache_free(b, slob_rcu->size);
591 void kmem_cache_free(struct kmem_cache *c, void *b)
593 kmemleak_free_recursive(b, c->flags);
594 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
595 struct slob_rcu *slob_rcu;
596 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
597 slob_rcu->size = c->size;
598 call_rcu(&slob_rcu->head, kmem_rcu_free);
600 __kmem_cache_free(b, c->size);
603 trace_kmem_cache_free(_RET_IP_, b);
605 EXPORT_SYMBOL(kmem_cache_free);
607 int __kmem_cache_shutdown(struct kmem_cache *c)
609 /* No way to check for remaining objects */
613 int kmem_cache_shrink(struct kmem_cache *d)
617 EXPORT_SYMBOL(kmem_cache_shrink);
619 struct kmem_cache kmem_cache_boot = {
620 .name = "kmem_cache",
621 .size = sizeof(struct kmem_cache),
623 .align = ARCH_KMALLOC_MINALIGN,
626 void __init kmem_cache_init(void)
628 kmem_cache = &kmem_cache_boot;
632 void __init kmem_cache_init_late(void)