#include <asm/page.h>
/*
- * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
- * SLAB_RED_ZONE & SLAB_POISON.
+ * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
*
* STATS - 1 to collect stats for /proc/slabinfo.
/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
-# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
+# define CREATE_MASK (SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_STORE_USER | \
#if DEBUG
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
- if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
- /* No constructor, but inital state check requested */
- printk(KERN_ERR "%s: No con, but init state check "
- "requested - %s\n", __FUNCTION__, name);
- flags &= ~SLAB_DEBUG_INITIAL;
- }
#if FORCED_DEBUG
/*
* Enable redzoning and last user accounting, except for caches with
* Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc().
*/
- BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW));
- if (flags & __GFP_NO_GROW)
- return 0;
+ BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
ctor_flags = SLAB_CTOR_CONSTRUCTOR;
local_flags = (flags & GFP_LEVEL_MASK);
- if (!(local_flags & __GFP_WAIT))
- /*
- * Not allowed to sleep. Need to tell a constructor about
- * this - it might need to know...
- */
- ctor_flags |= SLAB_CTOR_ATOMIC;
-
/* Take the l3 list lock to change the colour_next on this node */
check_irq_off();
l3 = cachep->nodelists[nodeid];
BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
- if (cachep->flags & SLAB_DEBUG_INITIAL) {
- /*
- * Need to call the slab's constructor so the caller can
- * perform a verify of its state (debugging). Called without
- * the cache-lock held.
- */
- cachep->ctor(objp + obj_offset(cachep),
- cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
- }
if (cachep->flags & SLAB_POISON && cachep->dtor) {
/* we want to cache poison the object,
* call the destruction callback
}
#endif
objp += obj_offset(cachep);
- if (cachep->ctor && cachep->flags & SLAB_POISON) {
- unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
-
- if (!(flags & __GFP_WAIT))
- ctor_flags |= SLAB_CTOR_ATOMIC;
-
- cachep->ctor(objp, cachep, ctor_flags);
- }
+ if (cachep->ctor && cachep->flags & SLAB_POISON)
+ cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR);
#if ARCH_SLAB_MINALIGN
if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
struct dentry *dir;
int err;
- err = init_fault_attr_dentries(&failslab.attr, "failslab");
+ err = init_fault_attr_dentries(&failslab.attr, "failslab");
if (err)
return err;
dir = failslab.attr.dentries.dir;
check_irq_off();
- if (should_failslab(cachep, flags))
- return NULL;
-
ac = cpu_cache_get(cachep);
if (likely(ac->avail)) {
STATS_INC_ALLOCHIT(cachep);
flags | GFP_THISNODE, nid);
}
- if (!obj && !(flags & __GFP_NO_GROW)) {
+ if (!obj) {
/*
* This allocation will be performed within the constraints
* of the current cpuset / memory policy requirements.
unsigned long save_flags;
void *ptr;
+ if (should_failslab(cachep, flags))
+ return NULL;
+
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
unsigned long save_flags;
void *objp;
+ if (should_failslab(cachep, flags))
+ return NULL;
+
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags);