memcg: flatten task_struct->memcg_oom
authorTejun Heo <tj@kernel.org>
Fri, 6 Nov 2015 02:46:09 +0000 (18:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Nov 2015 03:34:48 +0000 (19:34 -0800)
task_struct->memcg_oom is a sub-struct containing fields which are used
for async memcg oom handling.  Most task_struct fields aren't packaged
this way and it can lead to unnecessary alignment paddings.  This patch
flattens it.

* task.memcg_oom.memcg          -> task.memcg_in_oom
* task.memcg_oom.gfp_mask -> task.memcg_oom_gfp_mask
* task.memcg_oom.order          -> task.memcg_oom_order
* task.memcg_oom.may_oom        -> task.memcg_may_oom

In addition, task.memcg_may_oom is relocated to where other bitfields are
which reduces the size of task_struct.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
include/linux/sched.h
mm/memcontrol.c

index 3e3318ddfc0e3e09a0e15825f78eb6052d628d78..56174c7199ee530e2ded7e6b7f7afd0396190aea 100644 (file)
@@ -406,19 +406,19 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 
 static inline void mem_cgroup_oom_enable(void)
 {
-       WARN_ON(current->memcg_oom.may_oom);
-       current->memcg_oom.may_oom = 1;
+       WARN_ON(current->memcg_may_oom);
+       current->memcg_may_oom = 1;
 }
 
 static inline void mem_cgroup_oom_disable(void)
 {
-       WARN_ON(!current->memcg_oom.may_oom);
-       current->memcg_oom.may_oom = 0;
+       WARN_ON(!current->memcg_may_oom);
+       current->memcg_may_oom = 0;
 }
 
 static inline bool task_in_memcg_oom(struct task_struct *p)
 {
-       return p->memcg_oom.memcg;
+       return p->memcg_in_oom;
 }
 
 bool mem_cgroup_oom_synchronize(bool wait);
index 5423b9c82fee09d5c4720028fbb7a1ed7bf9b571..17bf8b845aa0f457b30f2033c5b88f3a93aea999 100644 (file)
@@ -1473,7 +1473,9 @@ struct task_struct {
        unsigned sched_reset_on_fork:1;
        unsigned sched_contributes_to_load:1;
        unsigned sched_migrated:1;
-
+#ifdef CONFIG_MEMCG
+       unsigned memcg_may_oom:1;
+#endif
 #ifdef CONFIG_MEMCG_KMEM
        unsigned memcg_kmem_skip_account:1;
 #endif
@@ -1804,12 +1806,9 @@ struct task_struct {
        unsigned long trace_recursion;
 #endif /* CONFIG_TRACING */
 #ifdef CONFIG_MEMCG
-       struct memcg_oom_info {
-               struct mem_cgroup *memcg;
-               gfp_t gfp_mask;
-               int order;
-               unsigned int may_oom:1;
-       } memcg_oom;
+       struct mem_cgroup *memcg_in_oom;
+       gfp_t memcg_oom_gfp_mask;
+       int memcg_oom_order;
 #endif
 #ifdef CONFIG_UPROBES
        struct uprobe_task *utask;
index c57c4423c68837d14816c5ff230435e1567e7c20..47bd7f13f526a7deb4a1a8e1c91f4d3edc7cd2de 100644 (file)
@@ -1661,7 +1661,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
 
 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 {
-       if (!current->memcg_oom.may_oom)
+       if (!current->memcg_may_oom)
                return;
        /*
         * We are in the middle of the charge context here, so we
@@ -1678,9 +1678,9 @@ static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
         * and when we know whether the fault was overall successful.
         */
        css_get(&memcg->css);
-       current->memcg_oom.memcg = memcg;
-       current->memcg_oom.gfp_mask = mask;
-       current->memcg_oom.order = order;
+       current->memcg_in_oom = memcg;
+       current->memcg_oom_gfp_mask = mask;
+       current->memcg_oom_order = order;
 }
 
 /**
@@ -1702,7 +1702,7 @@ static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
  */
 bool mem_cgroup_oom_synchronize(bool handle)
 {
-       struct mem_cgroup *memcg = current->memcg_oom.memcg;
+       struct mem_cgroup *memcg = current->memcg_in_oom;
        struct oom_wait_info owait;
        bool locked;
 
@@ -1730,8 +1730,8 @@ bool mem_cgroup_oom_synchronize(bool handle)
        if (locked && !memcg->oom_kill_disable) {
                mem_cgroup_unmark_under_oom(memcg);
                finish_wait(&memcg_oom_waitq, &owait.wait);
-               mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
-                                        current->memcg_oom.order);
+               mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
+                                        current->memcg_oom_order);
        } else {
                schedule();
                mem_cgroup_unmark_under_oom(memcg);
@@ -1748,7 +1748,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
                memcg_oom_recover(memcg);
        }
 cleanup:
-       current->memcg_oom.memcg = NULL;
+       current->memcg_in_oom = NULL;
        css_put(&memcg->css);
        return true;
 }