mm: compaction: do not mark unmovable pageblocks as skipped in async compaction
[firefly-linux-kernel-4.4.55.git] / mm / oom_kill.c
index 6738c47f1f7280edc5f3fe610b2658195a0a77e0..054ff47c4478ddb6d6ef77cb66f6fd8330bdbea4 100644 (file)
@@ -47,19 +47,21 @@ static DEFINE_SPINLOCK(zone_scan_lock);
 #ifdef CONFIG_NUMA
 /**
  * has_intersects_mems_allowed() - check task eligiblity for kill
- * @tsk: task struct of which task to consider
+ * @start: task struct of which task to consider
  * @mask: nodemask passed to page allocator for mempolicy ooms
  *
  * Task eligibility is determined by whether or not a candidate task, @tsk,
  * shares the same mempolicy nodes as current if it is bound by such a policy
  * and whether or not it has the same set of allowed cpuset nodes.
  */
-static bool has_intersects_mems_allowed(struct task_struct *tsk,
+static bool has_intersects_mems_allowed(struct task_struct *start,
                                        const nodemask_t *mask)
 {
-       struct task_struct *start = tsk;
+       struct task_struct *tsk;
+       bool ret = false;
 
-       do {
+       rcu_read_lock();
+       for_each_thread(start, tsk) {
                if (mask) {
                        /*
                         * If this is a mempolicy constrained oom, tsk's
@@ -67,19 +69,20 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
                         * mempolicy intersects current, otherwise it may be
                         * needlessly killed.
                         */
-                       if (mempolicy_nodemask_intersects(tsk, mask))
-                               return true;
+                       ret = mempolicy_nodemask_intersects(tsk, mask);
                } else {
                        /*
                         * This is not a mempolicy constrained oom, so only
                         * check the mems of tsk's cpuset.
                         */
-                       if (cpuset_mems_allowed_intersects(current, tsk))
-                               return true;
+                       ret = cpuset_mems_allowed_intersects(current, tsk);
                }
-       } while_each_thread(start, tsk);
+               if (ret)
+                       break;
+       }
+       rcu_read_unlock();
 
-       return false;
+       return ret;
 }
 #else
 static bool has_intersects_mems_allowed(struct task_struct *tsk,
@@ -97,16 +100,21 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
  */
 struct task_struct *find_lock_task_mm(struct task_struct *p)
 {
-       struct task_struct *t = p;
+       struct task_struct *t;
 
-       do {
+       rcu_read_lock();
+
+       for_each_thread(p, t) {
                task_lock(t);
                if (likely(t->mm))
-                       return t;
+                       goto found;
                task_unlock(t);
-       } while_each_thread(p, t);
+       }
+       t = NULL;
+found:
+       rcu_read_unlock();
 
-       return NULL;
+       return t;
 }
 
 /* return true if the task is not adequate as candidate victim task. */
@@ -161,7 +169,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
         * The baseline for the badness score is the proportion of RAM that each
         * task's rss, pagetable and swap space use.
         */
-       points = get_mm_rss(p->mm) + p->mm->nr_ptes +
+       points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
                 get_mm_counter(p->mm, MM_SWAPENTS);
        task_unlock(p);
 
@@ -301,7 +309,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
        unsigned long chosen_points = 0;
 
        rcu_read_lock();
-       do_each_thread(g, p) {
+       for_each_process_thread(g, p) {
                unsigned int points;
 
                switch (oom_scan_process_thread(p, totalpages, nodemask,
@@ -323,7 +331,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                        chosen = p;
                        chosen_points = points;
                }
-       } while_each_thread(g, p);
+       }
        if (chosen)
                get_task_struct(chosen);
        rcu_read_unlock();
@@ -364,10 +372,10 @@ static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemas
                        continue;
                }
 
-               pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu         %5hd %s\n",
+               pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu         %5hd %s\n",
                        task->pid, from_kuid(&init_user_ns, task_uid(task)),
                        task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
-                       task->mm->nr_ptes,
+                       atomic_long_read(&task->mm->nr_ptes),
                        get_mm_counter(task->mm, MM_SWAPENTS),
                        task->signal->oom_score_adj, task->comm);
                task_unlock(task);
@@ -406,7 +414,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
 {
        struct task_struct *victim = p;
        struct task_struct *child;
-       struct task_struct *t = p;
+       struct task_struct *t;
        struct mm_struct *mm;
        unsigned int victim_points = 0;
        static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
@@ -437,7 +445,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         * still freeing memory.
         */
        read_lock(&tasklist_lock);
-       do {
+       for_each_thread(p, t) {
                list_for_each_entry(child, &t->children, sibling) {
                        unsigned int child_points;
 
@@ -455,13 +463,11 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
                                get_task_struct(victim);
                        }
                }
-       } while_each_thread(p, t);
+       }
        read_unlock(&tasklist_lock);
 
-       rcu_read_lock();
        p = find_lock_task_mm(victim);
        if (!p) {
-               rcu_read_unlock();
                put_task_struct(victim);
                return;
        } else if (victim != p) {
@@ -487,6 +493,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         * That thread will now get access to memory reserves since it has a
         * pending fatal signal.
         */
+       rcu_read_lock();
        for_each_process(p)
                if (p->mm == mm && !same_thread_group(p, victim) &&
                    !(p->flags & PF_KTHREAD)) {