projects
/
firefly-linux-kernel-4.4.55.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
slab: correct pfmemalloc check
[firefly-linux-kernel-4.4.55.git]
/
mm
/
slab.c
diff --git
a/mm/slab.c
b/mm/slab.c
index 35cb0c861508c898264c56818ac431086c6f39e1..0b4ddafd8a030fb2269af01134a4a5a9eb342b6d 100644
(file)
--- a/
mm/slab.c
+++ b/
mm/slab.c
@@
-787,7
+787,7
@@
static void next_reap_node(void)
* the CPUs getting into lockstep and contending for the global cache chain
* lock.
*/
* the CPUs getting into lockstep and contending for the global cache chain
* lock.
*/
-static void
__cpuinit
start_cpu_timer(int cpu)
+static void start_cpu_timer(int cpu)
{
struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
{
struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
@@
-930,7
+930,8
@@
static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
{
if (unlikely(pfmemalloc_active)) {
/* Some pfmemalloc slabs exist, check if this is one */
{
if (unlikely(pfmemalloc_active)) {
/* Some pfmemalloc slabs exist, check if this is one */
- struct page *page = virt_to_head_page(objp);
+ struct slab *slabp = virt_to_slab(objp);
+ struct page *page = virt_to_head_page(slabp->s_mem);
if (PageSlabPfmemalloc(page))
set_obj_pfmemalloc(&objp);
}
if (PageSlabPfmemalloc(page))
set_obj_pfmemalloc(&objp);
}
@@
-1186,7
+1187,7
@@
static inline int slabs_tofree(struct kmem_cache *cachep,
return (n->free_objects + cachep->num - 1) / cachep->num;
}
return (n->free_objects + cachep->num - 1) / cachep->num;
}
-static void
__cpuinit
cpuup_canceled(long cpu)
+static void cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
struct kmem_cache_node *n = NULL;
{
struct kmem_cache *cachep;
struct kmem_cache_node *n = NULL;
@@
-1251,7
+1252,7
@@
free_array_cache:
}
}
}
}
-static int
__cpuinit
cpuup_prepare(long cpu)
+static int cpuup_prepare(long cpu)
{
struct kmem_cache *cachep;
struct kmem_cache_node *n = NULL;
{
struct kmem_cache *cachep;
struct kmem_cache_node *n = NULL;
@@
-1334,7
+1335,7
@@
bad:
return -ENOMEM;
}
return -ENOMEM;
}
-static int
__cpuinit
cpuup_callback(struct notifier_block *nfb,
+static int cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@
-1390,7
+1391,7
@@
static int __cpuinit cpuup_callback(struct notifier_block *nfb,
return notifier_from_errno(err);
}
return notifier_from_errno(err);
}
-static struct notifier_block
__cpuinitdata
cpucache_notifier = {
+static struct notifier_block cpucache_notifier = {
&cpuup_callback, NULL, 0
};
&cpuup_callback, NULL, 0
};
@@
-1776,7
+1777,7
@@
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
__SetPageSlab(page + i);
if (page->pfmemalloc)
__SetPageSlab(page + i);
if (page->pfmemalloc)
- SetPageSlabPfmemalloc(page
+ i
);
+ SetPageSlabPfmemalloc(page);
}
memcg_bind_pages(cachep, cachep->gfporder);
}
memcg_bind_pages(cachep, cachep->gfporder);
@@
-1809,9
+1810,10
@@
static void kmem_freepages(struct kmem_cache *cachep, void *addr)
else
sub_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_freed);
else
sub_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_freed);
+
+ __ClearPageSlabPfmemalloc(page);
while (i--) {
BUG_ON(!PageSlab(page));
while (i--) {
BUG_ON(!PageSlab(page));
- __ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
page++;
}
__ClearPageSlab(page);
page++;
}