BACKPORT: sched/fair: Make the use of prev_cpu consistent in the wakeup path
authorMorten Rasmussen <morten.rasmussen@arm.com>
Wed, 22 Jun 2016 17:03:13 +0000 (18:03 +0100)
committerAmit Pundir <amit.pundir@linaro.org>
Wed, 21 Jun 2017 11:07:20 +0000 (16:37 +0530)
In commit:

  ac66f5477239 ("sched/numa: Introduce migrate_swap()")

select_task_rq() got a 'cpu' argument to enable overriding of prev_cpu
in special cases (NUMA task swapping).

However, the select_task_rq_fair() helper functions: wake_affine() and
select_idle_sibling(), still use task_cpu(p) directly to work out
prev_cpu, which leads to inconsistencies.

This patch passes prev_cpu (potentially overridden by NUMA code) into
the helper functions to ensure prev_cpu is indeed the same CPU
everywhere in the wakeup path.

Change-Id: I4951c4eead2e6045e4fb34e89f6cda17d881d4d7
cc: Ingo Molnar <mingo@redhat.com>
cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dietmar.eggemann@arm.com
Cc: linux-kernel@vger.kernel.org
Cc: mgalbraith@suse.de
Cc: vincent.guittot@linaro.org
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1466615004-3503-3-git-send-email-morten.rasmussen@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
(cherry picked from commit 772bd008cd9a1d4e8ce566f2edcc61d1c28fcbe5)
[merged with Android/EAS wakeup path]
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
kernel/sched/fair.c

index 2150edce955a429371954625033c51e65e4c103f..9b64a0e74da766c7385923f8b5b903a467cc3a3c 100644 (file)
@@ -670,7 +670,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 }
 
 #ifdef CONFIG_SMP
-static int select_idle_sibling(struct task_struct *p, int cpu);
+static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
 static unsigned long task_h_load(struct task_struct *p);
 
 /*
@@ -1404,7 +1404,8 @@ balance:
         * Call select_idle_sibling to maybe find a better one.
         */
        if (!cur)
-               env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
+               env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
+                                                  env->dst_cpu);
 
 assign:
        assigned = true;
@@ -5280,18 +5281,18 @@ static int wake_wide(struct task_struct *p)
        return 1;
 }
 
-static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
+static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+                      int prev_cpu, int sync)
 {
        s64 this_load, load;
        s64 this_eff_load, prev_eff_load;
-       int idx, this_cpu, prev_cpu;
+       int idx, this_cpu;
        struct task_group *tg;
        unsigned long weight;
        int balanced;
 
        idx       = sd->wake_idx;
        this_cpu  = smp_processor_id();
-       prev_cpu  = task_cpu(p);
        load      = source_load(prev_cpu, idx);
        this_load = target_load(this_cpu, idx);
 
@@ -5605,11 +5606,10 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 /*
  * Try and locate an idle CPU in the sched_domain.
  */
-static int select_idle_sibling(struct task_struct *p, int target)
+static int select_idle_sibling(struct task_struct *p, int prev, int target)
 {
        struct sched_domain *sd;
        struct sched_group *sg;
-       int i = task_cpu(p);
        int best_idle = -1;
        int best_idle_cstate = -1;
        int best_idle_capacity = INT_MAX;
@@ -5621,8 +5621,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
                /*
                 * If the prevous cpu is cache affine and idle, don't be stupid.
                 */
-               if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
-                       return i;
+               if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev))
+                       return prev;
        }
 
        /*
@@ -5632,6 +5632,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
        for_each_lower_domain(sd) {
                sg = sd->groups;
                do {
+                       int i;
                        if (!cpumask_intersects(sched_group_cpus(sg),
                                                tsk_cpus_allowed(p)))
                                goto next;
@@ -5942,7 +5943,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
 
        if (affine_sd) {
                sd = NULL; /* Prefer wake_affine over balance flags */
-               if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
+               if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
                        new_cpu = cpu;
        }
 
@@ -5950,7 +5951,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
                if (energy_aware() && !cpu_rq(cpu)->rd->overutilized)
                        new_cpu = energy_aware_wake_cpu(p, prev_cpu, sync);
                else if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
-                       new_cpu = select_idle_sibling(p, new_cpu);
+                       new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
 
        } else while (sd) {
                struct sched_group *group;