projects
/
firefly-linux-kernel-4.4.55.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mm/memblock: switch to use NUMA_NO_NODE instead of MAX_NUMNODES
[firefly-linux-kernel-4.4.55.git]
/
mm
/
slub.c
diff --git
a/mm/slub.c
b/mm/slub.c
index c3eb3d3ca83565b925e197f2ad44ac2c313345b8..545a170ebf9f66cf0e3716c9cd6f4cb7eef0eda6 100644
(file)
--- a/
mm/slub.c
+++ b/
mm/slub.c
@@
-155,7
+155,7
@@
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
/*
* Maximum number of desirable partial slabs.
* The existence of more partial slabs makes kmem_cache_shrink
/*
* Maximum number of desirable partial slabs.
* The existence of more partial slabs makes kmem_cache_shrink
- * sort the partial list by the number of objects in
th
e.
+ * sort the partial list by the number of objects in
us
e.
*/
#define MAX_PARTIAL 10
*/
#define MAX_PARTIAL 10
@@
-933,6
+933,16
@@
static void trace(struct kmem_cache *s, struct page *page, void *object,
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
flags &= gfp_allowed_mask;
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
flags &= gfp_allowed_mask;
@@
-955,7
+965,7
@@
static inline void slab_free_hook(struct kmem_cache *s, void *x)
kmemleak_free_recursive(x, s->flags);
/*
kmemleak_free_recursive(x, s->flags);
/*
- * Trouble is that we may no longer disable interupts in the fast path
+ * Trouble is that we may no longer disable inter
r
upts in the fast path
* So in order to make the debug calls that expect irqs to be
* disabled we need to disable interrupts temporarily.
*/
* So in order to make the debug calls that expect irqs to be
* disabled we need to disable interrupts temporarily.
*/
@@
-1217,8
+1227,8
@@
static unsigned long kmem_cache_flags(unsigned long object_size,
/*
* Enable debugging if selected on the kernel commandline.
*/
/*
* Enable debugging if selected on the kernel commandline.
*/
- if (slub_debug && (!slub_debug_slabs ||
- !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
+ if (slub_debug && (!slub_debug_slabs ||
(name &&
+ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))
)
flags |= slub_debug;
return flags;
flags |= slub_debug;
return flags;
@@
-1260,13
+1270,30
@@
static inline void inc_slabs_node(struct kmem_cache *s, int node,
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
+static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
+{
+ kmemleak_alloc(ptr, size, 1, flags);
+}
+
+static inline void kfree_hook(const void *x)
+{
+ kmemleak_free(x);
+}
+
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{ return 0; }
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- void *object) {}
+ void *object)
+{
+ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
+ flags & gfp_allowed_mask);
+}
-static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
+{
+ kmemleak_free_recursive(x, s->flags);
+}
#endif /* CONFIG_SLUB_DEBUG */
#endif /* CONFIG_SLUB_DEBUG */
@@
-2829,8
+2856,8
@@
static struct kmem_cache *kmem_cache_node;
* slab on the node for this slabcache. There are no concurrent accesses
* possible.
*
* slab on the node for this slabcache. There are no concurrent accesses
* possible.
*
- * Note that this function only works on the km
alloc_node_cach
e
- * when allocating for the km
alloc_node_cach
e. This is used for bootstrapping
+ * Note that this function only works on the km
em_cache_nod
e
+ * when allocating for the km
em_cache_nod
e. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet.
*/
static void early_kmem_cache_node_alloc(int node)
* memory on a fresh node that has no slab structures yet.
*/
static void early_kmem_cache_node_alloc(int node)
@@
-3272,7
+3299,7
@@
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page)
ptr = page_address(page);
if (page)
ptr = page_address(page);
- km
emleak_alloc(ptr, size, 1
, flags);
+ km
alloc_large_node_hook(ptr, size
, flags);
return ptr;
}
return ptr;
}
@@
-3336,7
+3363,7
@@
void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
- k
memleak_free
(x);
+ k
free_hook
(x);
__free_memcg_kmem_pages(page, compound_order(page));
return;
}
__free_memcg_kmem_pages(page, compound_order(page));
return;
}
@@
-4983,7
+5010,7
@@
static ssize_t slab_attr_store(struct kobject *kobj,
* through the descendants with best-effort propagation.
*/
for_each_memcg_cache_index(i) {
* through the descendants with best-effort propagation.
*/
for_each_memcg_cache_index(i) {
- struct kmem_cache *c = cache_from_memcg(s, i);
+ struct kmem_cache *c = cache_from_memcg
_idx
(s, i);
if (c)
attribute->store(c, buf, len);
}
if (c)
attribute->store(c, buf, len);
}