X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=mm%2Fslab.c;h=0b4ddafd8a030fb2269af01134a4a5a9eb342b6d;hb=73293c2f900d0adbb6a415b312cd57976d5ae242;hp=8ccd296c6d9c7586fca85fc89bfd9f7a4023e220;hpb=19d2f8e0fb7bba99cc585d2467e9fa54a84c8557;p=firefly-linux-kernel-4.4.55.git diff --git a/mm/slab.c b/mm/slab.c index 8ccd296c6d9c..0b4ddafd8a03 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -565,7 +565,7 @@ static void init_node_lock_keys(int q) if (slab_state < UP) return; - for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { + for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) { struct kmem_cache_node *n; struct kmem_cache *cache = kmalloc_caches[i]; @@ -787,7 +787,7 @@ static void next_reap_node(void) * the CPUs getting into lockstep and contending for the global cache chain * lock. */ -static void __cpuinit start_cpu_timer(int cpu) +static void start_cpu_timer(int cpu) { struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); @@ -930,7 +930,8 @@ static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, { if (unlikely(pfmemalloc_active)) { /* Some pfmemalloc slabs exist, check if this is one */ - struct page *page = virt_to_head_page(objp); + struct slab *slabp = virt_to_slab(objp); + struct page *page = virt_to_head_page(slabp->s_mem); if (PageSlabPfmemalloc(page)) set_obj_pfmemalloc(&objp); } @@ -1180,7 +1181,13 @@ static int init_cache_node_node(int node) return 0; } -static void __cpuinit cpuup_canceled(long cpu) +static inline int slabs_tofree(struct kmem_cache *cachep, + struct kmem_cache_node *n) +{ + return (n->free_objects + cachep->num - 1) / cachep->num; +} + +static void cpuup_canceled(long cpu) { struct kmem_cache *cachep; struct kmem_cache_node *n = NULL; @@ -1241,11 +1248,11 @@ free_array_cache: n = cachep->node[node]; if (!n) continue; - drain_freelist(cachep, n, n->free_objects); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); } } -static int __cpuinit cpuup_prepare(long cpu) +static int cpuup_prepare(long cpu) { struct kmem_cache *cachep; struct kmem_cache_node *n = NULL; @@ -1328,7 +1335,7 @@ bad: return -ENOMEM; } -static int __cpuinit cpuup_callback(struct notifier_block *nfb, +static int cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -1384,7 +1391,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, return notifier_from_errno(err); } -static struct notifier_block __cpuinitdata cpucache_notifier = { +static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; @@ -1408,7 +1415,7 @@ static int __meminit drain_cache_node_node(int node) if (!n) continue; - drain_freelist(cachep, n, n->free_objects); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); if (!list_empty(&n->slabs_full) || !list_empty(&n->slabs_partial)) { @@ -1770,7 +1777,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) __SetPageSlab(page + i); if (page->pfmemalloc) - SetPageSlabPfmemalloc(page + i); + SetPageSlabPfmemalloc(page); } memcg_bind_pages(cachep, cachep->gfporder); @@ -1803,9 +1810,10 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) else sub_zone_page_state(page_zone(page), NR_SLAB_UNRECLAIMABLE, nr_freed); + + __ClearPageSlabPfmemalloc(page); while (i--) { BUG_ON(!PageSlab(page)); - __ClearPageSlabPfmemalloc(page); __ClearPageSlab(page); page++; } @@ -2532,7 +2540,7 @@ static int __cache_shrink(struct kmem_cache *cachep) if (!n) continue; - drain_freelist(cachep, n, n->free_objects); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); ret += !list_empty(&n->slabs_full) || !list_empty(&n->slabs_partial); @@ -3338,18 +3346,6 @@ done: return obj; } -/** - * kmem_cache_alloc_node - Allocate an object on the specified node - * @cachep: The cache to allocate from. - * @flags: See kmalloc(). - * @nodeid: node number of the target node. - * @caller: return address of caller, used for debug information - * - * Identical to kmem_cache_alloc but it will allocate memory on the given - * node, which can improve the performance for cpu bound structures. - * - * Fallback to other node is possible if __GFP_THISNODE is not set. - */ static __always_inline void * slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long caller) @@ -3643,6 +3639,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); #endif #ifdef CONFIG_NUMA +/** + * kmem_cache_alloc_node - Allocate an object on the specified node + * @cachep: The cache to allocate from. + * @flags: See kmalloc(). + * @nodeid: node number of the target node. + * + * Identical to kmem_cache_alloc but it will allocate memory on the given + * node, which can improve the performance for cpu bound structures. + * + * Fallback to other node is possible if __GFP_THISNODE is not set. + */ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); @@ -4431,20 +4438,10 @@ static int leaks_show(struct seq_file *m, void *p) return 0; } -static void *s_next(struct seq_file *m, void *p, loff_t *pos) -{ - return seq_list_next(p, &slab_caches, pos); -} - -static void s_stop(struct seq_file *m, void *p) -{ - mutex_unlock(&slab_mutex); -} - static const struct seq_operations slabstats_op = { .start = leaks_start, - .next = s_next, - .stop = s_stop, + .next = slab_next, + .stop = slab_stop, .show = leaks_show, };