projects
/
firefly-linux-kernel-4.4.55.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[firefly-linux-kernel-4.4.55.git]
/
mm
/
slab.c
diff --git
a/mm/slab.c
b/mm/slab.c
index c4b89eaf4c96416eca9e0e8817628525b5c44a9a..7eb38dd1cefa2f988be6297c1b75b12369e05827 100644
(file)
--- a/
mm/slab.c
+++ b/
mm/slab.c
@@
-857,6
+857,11
@@
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
return NULL;
}
return NULL;
}
+static inline gfp_t gfp_exact_node(gfp_t flags)
+{
+ return flags;
+}
+
#else /* CONFIG_NUMA */
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
#else /* CONFIG_NUMA */
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
@@
-1023,6
+1028,15
@@
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
return __cache_free_alien(cachep, objp, node, page_node);
}
return __cache_free_alien(cachep, objp, node, page_node);
}
+
+/*
+ * Construct gfp mask to allocate from a specific node but do not invoke reclaim
+ * or warn about failures.
+ */
+static inline gfp_t gfp_exact_node(gfp_t flags)
+{
+ return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT;
+}
#endif
/*
#endif
/*
@@
-2825,7
+2839,7
@@
alloc_done:
if (unlikely(!ac->avail)) {
int x;
force_grow:
if (unlikely(!ac->avail)) {
int x;
force_grow:
- x = cache_grow(cachep,
flags | GFP_THISNODE
, node, NULL);
+ x = cache_grow(cachep,
gfp_exact_node(flags)
, node, NULL);
/* cache_grow can reenable interrupts, then ac could change. */
ac = cpu_cache_get(cachep);
/* cache_grow can reenable interrupts, then ac could change. */
ac = cpu_cache_get(cachep);
@@
-3019,7
+3033,7
@@
retry:
get_node(cache, nid) &&
get_node(cache, nid)->free_objects) {
obj = ____cache_alloc_node(cache,
get_node(cache, nid) &&
get_node(cache, nid)->free_objects) {
obj = ____cache_alloc_node(cache,
-
flags | GFP_THISNODE
, nid);
+
gfp_exact_node(flags)
, nid);
if (obj)
break;
}
if (obj)
break;
}
@@
-3047,7
+3061,7
@@
retry:
nid = page_to_nid(page);
if (cache_grow(cache, flags, nid, page)) {
obj = ____cache_alloc_node(cache,
nid = page_to_nid(page);
if (cache_grow(cache, flags, nid, page)) {
obj = ____cache_alloc_node(cache,
-
flags | GFP_THISNODE
, nid);
+
gfp_exact_node(flags)
, nid);
if (!obj)
/*
* Another processor may allocate the
if (!obj)
/*
* Another processor may allocate the
@@
-3118,7
+3132,7
@@
retry:
must_grow:
spin_unlock(&n->list_lock);
must_grow:
spin_unlock(&n->list_lock);
- x = cache_grow(cachep,
flags | GFP_THISNODE
, nodeid, NULL);
+ x = cache_grow(cachep,
gfp_exact_node(flags)
, nodeid, NULL);
if (x)
goto retry;
if (x)
goto retry;