}
}
-void __kmem_cache_destroy(struct kmem_cache *cachep)
-{
- int i;
- struct kmem_list3 *l3;
-
- for_each_online_cpu(i)
- kfree(cachep->array[i]);
-
- /* NUMA: free the list3 structures */
- for_each_online_node(i) {
- l3 = cachep->nodelists[i];
- if (l3) {
- kfree(l3->shared);
- free_alien_cache(l3->alien);
- kfree(l3);
- }
- }
-}
-
-
/**
* calculate_slab_order - calculate size (page order) of slabs
* @cachep: pointer to the cache that is being created
* Cannot be called within a int, but can be interrupted.
* The @ctor is run when new pages are allocated by the cache.
*
- * @name must be valid until the cache is destroyed. This implies that
- * the module calling this has to destroy the cache before getting unloaded.
- *
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
cachep->refcount = 1;
if (setup_cpu_cache(cachep, gfp)) {
- __kmem_cache_destroy(cachep);
+ __kmem_cache_shutdown(cachep);
return NULL;
}
int __kmem_cache_shutdown(struct kmem_cache *cachep)
{
- return __cache_shrink(cachep);
+ int i;
+ struct kmem_list3 *l3;
+ int rc = __cache_shrink(cachep);
+
+ if (rc)
+ return rc;
+
+ for_each_online_cpu(i)
+ kfree(cachep->array[i]);
+
+ /* NUMA: free the list3 structures */
+ for_each_online_node(i) {
+ l3 = cachep->nodelists[i];
+ if (l3) {
+ kfree(l3->shared);
+ free_alien_cache(l3->alien);
+ kfree(l3);
+ }
+ }
+ return 0;
}
/*
size_t align, unsigned long flags, void (*ctor)(void *));
int __kmem_cache_shutdown(struct kmem_cache *);
-void __kmem_cache_destroy(struct kmem_cache *);
#endif
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
- __kmem_cache_destroy(s);
kmem_cache_free(kmem_cache, s);
} else {
list_add(&s->list, &slab_caches);
int __kmem_cache_shutdown(struct kmem_cache *s)
{
- return kmem_cache_close(s);
-}
+ int rc = kmem_cache_close(s);
-void __kmem_cache_destroy(struct kmem_cache *s)
-{
- sysfs_slab_remove(s);
+ if (!rc)
+ sysfs_slab_remove(s);
+
+ return rc;
}
/********************************************************************