1 #ifndef _LINUX_SLUB_DEF_H
2 #define _LINUX_SLUB_DEF_H
5 * SLUB : A Slab allocator without object queues.
7 * (C) 2007 SGI, Christoph Lameter
9 #include <linux/types.h>
10 #include <linux/gfp.h>
11 #include <linux/bug.h>
12 #include <linux/workqueue.h>
13 #include <linux/kobject.h>
15 #include <linux/kmemleak.h>
18 ALLOC_FASTPATH, /* Allocation from cpu slab */
19 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
20 FREE_FASTPATH, /* Free to cpu slub */
21 FREE_SLOWPATH, /* Freeing not to cpu slab */
22 FREE_FROZEN, /* Freeing to frozen slab */
23 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
24 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
25 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
26 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
27 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
28 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
29 FREE_SLAB, /* Slab freed to the page allocator */
30 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
31 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
32 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
33 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
34 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
35 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
36 DEACTIVATE_BYPASS, /* Implicit deactivation */
37 ORDER_FALLBACK, /* Number of times fallback was necessary */
38 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
39 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
40 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
41 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
42 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
43 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
46 struct kmem_cache_cpu {
47 void **freelist; /* Pointer to next available object */
48 unsigned long tid; /* Globally unique transaction id */
49 struct page *page; /* The slab from which we are allocating */
50 struct page *partial; /* Partially allocated frozen slabs */
51 #ifdef CONFIG_SLUB_STATS
52 unsigned stat[NR_SLUB_STAT_ITEMS];
56 struct kmem_cache_node {
57 spinlock_t list_lock; /* Protect partial list and nr_partial */
58 unsigned long nr_partial;
59 struct list_head partial;
60 #ifdef CONFIG_SLUB_DEBUG
61 atomic_long_t nr_slabs;
62 atomic_long_t total_objects;
63 struct list_head full;
68 * Word size structure that can be atomically updated or read and that
69 * contains both the order and the number of objects that a slab of the
70 * given order would contain.
72 struct kmem_cache_order_objects {
77 * Slab cache management.
80 struct kmem_cache_cpu __percpu *cpu_slab;
81 /* Used for retriving partial slabs etc */
83 unsigned long min_partial;
84 int size; /* The size of an object including meta data */
85 int object_size; /* The size of an object without meta data */
86 int offset; /* Free pointer offset. */
87 int cpu_partial; /* Number of per cpu partial objects to keep around */
88 struct kmem_cache_order_objects oo;
90 /* Allocation and freeing of slabs */
91 struct kmem_cache_order_objects max;
92 struct kmem_cache_order_objects min;
93 gfp_t allocflags; /* gfp flags to use on each alloc */
94 int refcount; /* Refcount for slab cache destroy */
96 int inuse; /* Offset to metadata */
97 int align; /* Alignment */
98 int reserved; /* Reserved bytes at the end of slabs */
99 const char *name; /* Name (only for display!) */
100 struct list_head list; /* List of slab caches */
102 struct kobject kobj; /* For sysfs */
104 #ifdef CONFIG_MEMCG_KMEM
105 struct memcg_cache_params *memcg_params;
106 int max_attr_size; /* for propagation, maximum size of a stored attr */
111 * Defragmentation by allocating from a remote node.
113 int remote_node_defrag_ratio;
115 struct kmem_cache_node *node[MAX_NUMNODES];
119 * Maximum kmalloc object size handled by SLUB. Larger object allocations
120 * are passed through to the page allocator. The page allocator "fastpath"
121 * is relatively slow so we need this value sufficiently high so that
122 * performance critical objects are allocated through the SLUB fastpath.
124 * This should be dropped to PAGE_SIZE / 2 once the page allocator
125 * "fastpath" becomes competitive with the slab allocator fastpaths.
127 #define SLUB_MAX_SIZE (2 * PAGE_SIZE)
129 #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
131 #ifdef CONFIG_ZONE_DMA
132 #define SLUB_DMA __GFP_DMA
134 /* Disable DMA functionality */
135 #define SLUB_DMA (__force gfp_t)0
139 * We keep the general caches in an array of slab caches that are used for
140 * 2^x bytes of allocations.
142 extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
145 * Find the slab cache for a given combination of allocation flags and size.
147 * This ought to end up with a global pointer to the right cache
150 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
152 int index = kmalloc_index(size);
157 return kmalloc_caches[index];
160 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
161 void *__kmalloc(size_t size, gfp_t flags);
163 static __always_inline void *
164 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
168 flags |= (__GFP_COMP | __GFP_KMEMCG);
169 ret = (void *) __get_free_pages(flags, order);
170 kmemleak_alloc(ret, size, 1, flags);
175 * Calling this on allocated memory will check that the memory
176 * is expected to be in use, and print warnings if not.
178 #ifdef CONFIG_SLUB_DEBUG
179 extern bool verify_mem_not_deleted(const void *x);
181 static inline bool verify_mem_not_deleted(const void *x)
187 #ifdef CONFIG_TRACING
189 kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
190 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
192 static __always_inline void *
193 kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
195 return kmem_cache_alloc(s, gfpflags);
198 static __always_inline void *
199 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
201 return kmalloc_order(size, flags, order);
205 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
207 unsigned int order = get_order(size);
208 return kmalloc_order_trace(size, flags, order);
211 static __always_inline void *kmalloc(size_t size, gfp_t flags)
213 if (__builtin_constant_p(size)) {
214 if (size > SLUB_MAX_SIZE)
215 return kmalloc_large(size, flags);
217 if (!(flags & SLUB_DMA)) {
218 struct kmem_cache *s = kmalloc_slab(size);
221 return ZERO_SIZE_PTR;
223 return kmem_cache_alloc_trace(s, flags, size);
226 return __kmalloc(size, flags);
230 void *__kmalloc_node(size_t size, gfp_t flags, int node);
231 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
233 #ifdef CONFIG_TRACING
234 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
236 int node, size_t size);
238 static __always_inline void *
239 kmem_cache_alloc_node_trace(struct kmem_cache *s,
241 int node, size_t size)
243 return kmem_cache_alloc_node(s, gfpflags, node);
247 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
249 if (__builtin_constant_p(size) &&
250 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
251 struct kmem_cache *s = kmalloc_slab(size);
254 return ZERO_SIZE_PTR;
256 return kmem_cache_alloc_node_trace(s, flags, node, size);
258 return __kmalloc_node(size, flags, node);
262 #endif /* _LINUX_SLUB_DEF_H */