memcg: get_mem_cgroup_from_mm()
authorJohannes Weiner <hannes@cmpxchg.org>
Mon, 7 Apr 2014 22:37:43 +0000 (15:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Apr 2014 23:35:56 +0000 (16:35 -0700)
Instead of returning NULL from try_get_mem_cgroup_from_mm() when the mm
owner is exiting, just return root_mem_cgroup.  This makes sense for all
callsites and gets rid of some of them having to fallback manually.

[fengguang.wu@intel.com: fix warnings]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c

index eccfb4a4b37928be40629c259f709e90f9f15620..134636f835f750806b58254f634c85578b001d5f 100644 (file)
@@ -94,7 +94,6 @@ bool task_in_mem_cgroup(struct task_struct *task,
 
 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
-extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
 
 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
 extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
@@ -294,11 +293,6 @@ static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
        return NULL;
 }
 
-static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
-{
-       return NULL;
-}
-
 static inline bool mm_match_cgroup(struct mm_struct *mm,
                struct mem_cgroup *memcg)
 {
index c3b674f9774f317e8188dbaaedfb7cbb8c8c4058..87c3ec37dd26c060d85726ec450bcac19042ef9c 100644 (file)
@@ -1071,7 +1071,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
        return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
 }
 
-struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
+static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
 {
        struct mem_cgroup *memcg = NULL;
 
@@ -1079,7 +1079,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
        do {
                memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
                if (unlikely(!memcg))
-                       break;
+                       memcg = root_mem_cgroup;
        } while (!css_tryget(&memcg->css));
        rcu_read_unlock();
        return memcg;
@@ -1475,7 +1475,7 @@ bool task_in_mem_cgroup(struct task_struct *task,
 
        p = find_lock_task_mm(task);
        if (p) {
-               curr = try_get_mem_cgroup_from_mm(p->mm);
+               curr = get_mem_cgroup_from_mm(p->mm);
                task_unlock(p);
        } else {
                /*
@@ -1489,8 +1489,6 @@ bool task_in_mem_cgroup(struct task_struct *task,
                        css_get(&curr->css);
                rcu_read_unlock();
        }
-       if (!curr)
-               return false;
        /*
         * We should check use_hierarchy of "memcg" not "curr". Because checking
         * use_hierarchy of "curr" here make this function true if hierarchy is
@@ -3617,15 +3615,7 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
        if (!current->mm || current->memcg_kmem_skip_account)
                return true;
 
-       memcg = try_get_mem_cgroup_from_mm(current->mm);
-
-       /*
-        * very rare case described in mem_cgroup_from_task. Unfortunately there
-        * isn't much we can do without complicating this too much, and it would
-        * be gfp-dependent anyway. Just let it go
-        */
-       if (unlikely(!memcg))
-               return true;
+       memcg = get_mem_cgroup_from_mm(current->mm);
 
        if (!memcg_can_account_kmem(memcg)) {
                css_put(&memcg->css);