mm/sl[au]b: Move slabinfo processing to slab_common.c
[firefly-linux-kernel-4.4.55.git] / mm / slub.c
index 97a49d9a37cdea11cdc424aff2e24ca2f2087c4a..77a0c8a9fc75c61a0590529483bf738af39cf84e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -210,11 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *);
 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
                                                        { return 0; }
-static inline void sysfs_slab_remove(struct kmem_cache *s)
-{
-       kfree(s->name);
-       kfree(s);
-}
+static inline void sysfs_slab_remove(struct kmem_cache *s) { }
 
 #endif
 
@@ -626,7 +622,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
        print_trailer(s, page, object);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
 {
        va_list args;
        char buf[100];
@@ -2627,6 +2623,13 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
 
        page = virt_to_head_page(x);
 
+       if (kmem_cache_debug(s) && page->slab != s) {
+               pr_err("kmem_cache_free: Wrong slab cache. %s but object"
+                       " is from  %s\n", page->slab->name, s->name);
+               WARN_ON_ONCE(1);
+               return;
+       }
+
        slab_free(s, page, x, _RET_IP_);
 
        trace_kmem_cache_free(_RET_IP_, x);
@@ -3041,17 +3044,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
 
 }
 
-static int kmem_cache_open(struct kmem_cache *s,
-               const char *name, size_t size,
-               size_t align, unsigned long flags,
-               void (*ctor)(void *))
+static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
 {
-       memset(s, 0, kmem_size);
-       s->name = name;
-       s->ctor = ctor;
-       s->object_size = size;
-       s->align = align;
-       s->flags = kmem_cache_flags(size, flags, name, ctor);
+       s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
        s->reserved = 0;
 
        if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
@@ -3113,7 +3108,6 @@ static int kmem_cache_open(struct kmem_cache *s,
        else
                s->cpu_partial = 30;
 
-       s->refcount = 1;
 #ifdef CONFIG_NUMA
        s->remote_node_defrag_ratio = 1000;
 #endif
@@ -3121,16 +3115,16 @@ static int kmem_cache_open(struct kmem_cache *s,
                goto error;
 
        if (alloc_kmem_cache_cpus(s))
-               return 1;
+               return 0;
 
        free_kmem_cache_nodes(s);
 error:
        if (flags & SLAB_PANIC)
                panic("Cannot create slab %s size=%lu realsize=%u "
                        "order=%u offset=%u flags=%lx\n",
-                       s->name, (unsigned long)size, s->size, oo_order(s->oo),
+                       s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
                        s->offset, flags);
-       return 0;
+       return -EINVAL;
 }
 
 /*
@@ -3152,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
                                     sizeof(long), GFP_ATOMIC);
        if (!map)
                return;
-       slab_err(s, page, "%s", text);
+       slab_err(s, page, text, s->name);
        slab_lock(page);
 
        get_map(s, page, map);
@@ -3184,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
                        discard_slab(s, page);
                } else {
                        list_slab_objects(s, page,
-                               "Objects remaining on kmem_cache_close()");
+                       "Objects remaining in %s on kmem_cache_close()");
                }
        }
 }
@@ -3197,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
        int node;
 
        flush_all(s);
-       free_percpu(s->cpu_slab);
        /* Attempt to free all objects */
        for_each_node_state(node, N_NORMAL_MEMORY) {
                struct kmem_cache_node *n = get_node(s, node);
@@ -3206,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
                if (n->nr_partial || slabs_node(s, node))
                        return 1;
        }
+       free_percpu(s->cpu_slab);
        free_kmem_cache_nodes(s);
        return 0;
 }
 
-/*
- * Close a cache and release the kmem_cache structure
- * (must be used for caches created using kmem_cache_create)
- */
-void kmem_cache_destroy(struct kmem_cache *s)
+int __kmem_cache_shutdown(struct kmem_cache *s)
 {
-       mutex_lock(&slab_mutex);
-       s->refcount--;
-       if (!s->refcount) {
-               list_del(&s->list);
-               mutex_unlock(&slab_mutex);
-               if (kmem_cache_close(s)) {
-                       printk(KERN_ERR "SLUB %s: %s called for cache that "
-                               "still has objects.\n", s->name, __func__);
-                       dump_stack();
-               }
-               if (s->flags & SLAB_DESTROY_BY_RCU)
-                       rcu_barrier();
+       int rc = kmem_cache_close(s);
+
+       if (!rc)
                sysfs_slab_remove(s);
-       } else
-               mutex_unlock(&slab_mutex);
+
+       return rc;
 }
-EXPORT_SYMBOL(kmem_cache_destroy);
 
 /********************************************************************
  *             Kmalloc subsystem
@@ -3241,8 +3221,6 @@ EXPORT_SYMBOL(kmem_cache_destroy);
 struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
 EXPORT_SYMBOL(kmalloc_caches);
 
-static struct kmem_cache *kmem_cache;
-
 #ifdef CONFIG_ZONE_DMA
 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
 #endif
@@ -3288,14 +3266,17 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
 {
        struct kmem_cache *s;
 
-       s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
+       s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
+
+       s->name = name;
+       s->size = s->object_size = size;
+       s->align = ARCH_KMALLOC_MINALIGN;
 
        /*
         * This function is called with IRQs disabled during early-boot on
         * single CPU so there's no need to take slab_mutex here.
         */
-       if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
-                                                               flags, NULL))
+       if (kmem_cache_open(s, flags))
                goto panic;
 
        list_add(&s->list, &slab_caches);
@@ -3734,12 +3715,12 @@ void __init kmem_cache_init(void)
                slub_max_order = 0;
 
        kmem_size = offsetof(struct kmem_cache, node) +
-                               nr_node_ids * sizeof(struct kmem_cache_node *);
+                       nr_node_ids * sizeof(struct kmem_cache_node *);
 
        /* Allocate two kmem_caches from the page allocator */
        kmalloc_size = ALIGN(kmem_size, cache_line_size());
        order = get_order(2 * kmalloc_size);
-       kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
+       kmem_cache = (void *)__get_free_pages(GFP_NOWAIT | __GFP_ZERO, order);
 
        /*
         * Must first have the slab cache available for the allocations of the
@@ -3748,9 +3729,10 @@ void __init kmem_cache_init(void)
         */
        kmem_cache_node = (void *)kmem_cache + kmalloc_size;
 
-       kmem_cache_open(kmem_cache_node, "kmem_cache_node",
-               sizeof(struct kmem_cache_node),
-               0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+       kmem_cache_node->name = "kmem_cache_node";
+       kmem_cache_node->size = kmem_cache_node->object_size =
+               sizeof(struct kmem_cache_node);
+       kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
 
        hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
 
@@ -3758,8 +3740,10 @@ void __init kmem_cache_init(void)
        slab_state = PARTIAL;
 
        temp_kmem_cache = kmem_cache;
-       kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
-               0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+       kmem_cache->name = "kmem_cache";
+       kmem_cache->size = kmem_cache->object_size = kmem_size;
+       kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
+
        kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
        memcpy(kmem_cache, temp_kmem_cache, kmem_size);
 
@@ -3948,11 +3932,10 @@ static struct kmem_cache *find_mergeable(size_t size,
        return NULL;
 }
 
-struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
                size_t align, unsigned long flags, void (*ctor)(void *))
 {
        struct kmem_cache *s;
-       char *n;
 
        s = find_mergeable(size, align, flags, name, ctor);
        if (s) {
@@ -3966,36 +3949,29 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
 
                if (sysfs_slab_alias(s, name)) {
                        s->refcount--;
-                       return NULL;
+                       s = NULL;
                }
-               return s;
        }
 
-       n = kstrdup(name, GFP_KERNEL);
-       if (!n)
-               return NULL;
+       return s;
+}
 
-       s = kmalloc(kmem_size, GFP_KERNEL);
-       if (s) {
-               if (kmem_cache_open(s, n,
-                               size, align, flags, ctor)) {
-                       int r;
+int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
+{
+       int err;
 
-                       list_add(&s->list, &slab_caches);
-                       mutex_unlock(&slab_mutex);
-                       r = sysfs_slab_add(s);
-                       mutex_lock(&slab_mutex);
+       err = kmem_cache_open(s, flags);
+       if (err)
+               return err;
 
-                       if (!r)
-                               return s;
+       mutex_unlock(&slab_mutex);
+       err = sysfs_slab_add(s);
+       mutex_lock(&slab_mutex);
 
-                       list_del(&s->list);
-                       kmem_cache_close(s);
-               }
-               kfree(s);
-       }
-       kfree(n);
-       return NULL;
+       if (err)
+               kmem_cache_close(s);
+
+       return err;
 }
 
 #ifdef CONFIG_SMP
@@ -5225,14 +5201,6 @@ static ssize_t slab_attr_store(struct kobject *kobj,
        return err;
 }
 
-static void kmem_cache_release(struct kobject *kobj)
-{
-       struct kmem_cache *s = to_slab(kobj);
-
-       kfree(s->name);
-       kfree(s);
-}
-
 static const struct sysfs_ops slab_sysfs_ops = {
        .show = slab_attr_show,
        .store = slab_attr_store,
@@ -5240,7 +5208,6 @@ static const struct sysfs_ops slab_sysfs_ops = {
 
 static struct kobj_type slab_ktype = {
        .sysfs_ops = &slab_sysfs_ops,
-       .release = kmem_cache_release
 };
 
 static int uevent_filter(struct kset *kset, struct kobject *kobj)
@@ -5438,7 +5405,7 @@ __initcall(slab_sysfs_init);
  * The /proc/slabinfo ABI
  */
 #ifdef CONFIG_SLABINFO
-static void print_slabinfo_header(struct seq_file *m)
+void print_slabinfo_header(struct seq_file *m)
 {
        seq_puts(m, "slabinfo - version: 2.1\n");
        seq_puts(m, "# name            <active_objs> <num_objs> <object_size> "
@@ -5448,28 +5415,7 @@ static void print_slabinfo_header(struct seq_file *m)
        seq_putc(m, '\n');
 }
 
-static void *s_start(struct seq_file *m, loff_t *pos)
-{
-       loff_t n = *pos;
-
-       mutex_lock(&slab_mutex);
-       if (!n)
-               print_slabinfo_header(m);
-
-       return seq_list_start(&slab_caches, *pos);
-}
-
-static void *s_next(struct seq_file *m, void *p, loff_t *pos)
-{
-       return seq_list_next(p, &slab_caches, pos);
-}
-
-static void s_stop(struct seq_file *m, void *p)
-{
-       mutex_unlock(&slab_mutex);
-}
-
-static int s_show(struct seq_file *m, void *p)
+int slabinfo_show(struct seq_file *m, void *p)
 {
        unsigned long nr_partials = 0;
        unsigned long nr_slabs = 0;
@@ -5505,29 +5451,9 @@ static int s_show(struct seq_file *m, void *p)
        return 0;
 }
 
-static const struct seq_operations slabinfo_op = {
-       .start = s_start,
-       .next = s_next,
-       .stop = s_stop,
-       .show = s_show,
-};
-
-static int slabinfo_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &slabinfo_op);
-}
-
-static const struct file_operations proc_slabinfo_operations = {
-       .open           = slabinfo_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release,
-};
-
-static int __init slab_proc_init(void)
+ssize_t slabinfo_write(struct file *file, const char __user *buffer,
+                      size_t count, loff_t *ppos)
 {
-       proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
-       return 0;
+       return -EIO;
 }
-module_init(slab_proc_init);
 #endif /* CONFIG_SLABINFO */