#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
int __kmem_cache_shutdown(struct kmem_cache *);
+int __kmem_cache_shrink(struct kmem_cache *);
void slab_kmem_cache_release(struct kmem_cache *);
struct seq_file;
return !s->memcg_params || s->memcg_params->is_root_cache;
}
-static inline void memcg_bind_pages(struct kmem_cache *s, int order)
-{
- if (!is_root_cache(s))
- atomic_add(1 << order, &s->memcg_params->nr_pages);
-}
-
-static inline void memcg_release_pages(struct kmem_cache *s, int order)
-{
- if (is_root_cache(s))
- return;
-
- if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
- mem_cgroup_destroy_cache(s);
-}
-
static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
return 0;
if (is_root_cache(s))
return 0;
- return memcg_charge_kmem(s->memcg_params->memcg, gfp,
- PAGE_SIZE << order);
+ return __memcg_charge_slab(s, gfp, order);
}
static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
return;
if (is_root_cache(s))
return;
- memcg_uncharge_kmem(s->memcg_params->memcg, PAGE_SIZE << order);
+ __memcg_uncharge_slab(s, order);
}
#else
static inline bool is_root_cache(struct kmem_cache *s)
return true;
}
-static inline void memcg_bind_pages(struct kmem_cache *s, int order)
-{
-}
-
-static inline void memcg_release_pages(struct kmem_cache *s, int order)
-{
-}
-
static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
return cachep;
pr_err("%s: Wrong slab cache. %s but object is from %s\n",
- __FUNCTION__, cachep->name, s->name);
+ __func__, cachep->name, s->name);
WARN_ON_ONCE(1);
return s;
}
-#endif
-
+#ifndef CONFIG_SLOB
/*
* The slab lists for all objects.
*/
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
struct array_cache *shared; /* shared per node */
- struct array_cache **alien; /* on other nodes */
+ struct alien_cache **alien; /* on other nodes */
unsigned long next_reap; /* updated without locking */
int free_touched; /* updated without locking */
#endif
};
+static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
+{
+ return s->node[node];
+}
+
+/*
+ * Iterator over all nodes. The body will be executed for each node that has
+ * a kmem_cache_node structure allocated (which is true for all online nodes)
+ */
+#define for_each_kmem_cache_node(__s, __node, __n) \
+ for (__node = 0; __n = get_node(__s, __node), __node < nr_node_ids; __node++) \
+ if (__n)
+
+#endif
+
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
void slab_stop(struct seq_file *m, void *p);
+
+#endif /* MM_SLAB_H */