Partial Revert: "WIP: sched: Add cpu capacity awareness to wakeup balancing"
authorDietmar Eggemann <dietmar.eggemann@arm.com>
Wed, 25 Jan 2017 10:03:36 +0000 (10:03 +0000)
committerAmit Pundir <amit.pundir@linaro.org>
Wed, 21 Jun 2017 11:07:20 +0000 (16:37 +0530)
Revert the changes in find_idlest_cpu() and find_idlest_group().

Keep the infrastructure bits which are used in following EAS patches.

Change-Id: Id516ca5f3e51b9a13db1ebb8de2df3aa25f9679b
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
kernel/sched/fair.c

index 12bb2ae026f5766fefa47dbb4f68075ea3ed9587..2150edce955a429371954625033c51e65e4c103f 100644 (file)
@@ -5384,11 +5384,6 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
        return __task_fits(p, cpu, 0);
 }
 
        return __task_fits(p, cpu, 0);
 }
 
-static inline bool task_fits_spare(struct task_struct *p, int cpu)
-{
-       return __task_fits(p, cpu, cpu_util(cpu));
-}
-
 static bool cpu_overutilized(int cpu)
 {
        return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
 static bool cpu_overutilized(int cpu)
 {
        return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
@@ -5509,9 +5504,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                  int this_cpu, int sd_flag)
 {
        struct sched_group *idlest = NULL, *group = sd->groups;
                  int this_cpu, int sd_flag)
 {
        struct sched_group *idlest = NULL, *group = sd->groups;
-       struct sched_group *fit_group = NULL;
        unsigned long min_load = ULONG_MAX, this_load = 0;
        unsigned long min_load = ULONG_MAX, this_load = 0;
-       unsigned long fit_capacity = ULONG_MAX;
        int load_idx = sd->forkexec_idx;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
        int load_idx = sd->forkexec_idx;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -5542,15 +5535,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                                load = target_load(i, load_idx);
 
                        avg_load += load;
                                load = target_load(i, load_idx);
 
                        avg_load += load;
-
-                       /*
-                        * Look for most energy-efficient group that can fit
-                        * that can fit the task.
-                        */
-                       if (capacity_of(i) < fit_capacity && task_fits_spare(p, i)) {
-                               fit_capacity = capacity_of(i);
-                               fit_group = group;
-                       }
                }
 
                /* Adjust by relative CPU capacity of the group */
                }
 
                /* Adjust by relative CPU capacity of the group */
@@ -5564,9 +5548,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                }
        } while (group = group->next, group != sd->groups);
 
                }
        } while (group = group->next, group != sd->groups);
 
-       if (fit_group)
-               return fit_group;
-
        if (!idlest || 100*this_load < imbalance*min_load)
                return NULL;
        return idlest;
        if (!idlest || 100*this_load < imbalance*min_load)
                return NULL;
        return idlest;
@@ -5587,7 +5568,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 
        /* Traverse only the allowed CPUs */
        for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
 
        /* Traverse only the allowed CPUs */
        for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
-               if (task_fits_spare(p, i)) {
+               if (idle_cpu(i)) {
                        struct rq *rq = cpu_rq(i);
                        struct cpuidle_state *idle = idle_get_state(rq);
                        if (idle && idle->exit_latency < min_exit_latency) {
                        struct rq *rq = cpu_rq(i);
                        struct cpuidle_state *idle = idle_get_state(rq);
                        if (idle && idle->exit_latency < min_exit_latency) {
@@ -5599,8 +5580,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
                                min_exit_latency = idle->exit_latency;
                                latest_idle_timestamp = rq->idle_stamp;
                                shallowest_idle_cpu = i;
                                min_exit_latency = idle->exit_latency;
                                latest_idle_timestamp = rq->idle_stamp;
                                shallowest_idle_cpu = i;
-                       } else if (idle_cpu(i) &&
-                                  (!idle || idle->exit_latency == min_exit_latency) &&
+                       } else if ((!idle || idle->exit_latency == min_exit_latency) &&
                                   rq->idle_stamp > latest_idle_timestamp) {
                                /*
                                 * If equal or no active idle state, then
                                   rq->idle_stamp > latest_idle_timestamp) {
                                /*
                                 * If equal or no active idle state, then
@@ -5609,13 +5589,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
                                 */
                                latest_idle_timestamp = rq->idle_stamp;
                                shallowest_idle_cpu = i;
                                 */
                                latest_idle_timestamp = rq->idle_stamp;
                                shallowest_idle_cpu = i;
-                       } else if (shallowest_idle_cpu == -1) {
-                               /*
-                                * If we haven't found an idle CPU yet
-                                * pick a non-idle one that can fit the task as
-                                * fallback.
-                                */
-                               shallowest_idle_cpu = i;
                        }
                } else if (shallowest_idle_cpu == -1) {
                        load = weighted_cpuload(i);
                        }
                } else if (shallowest_idle_cpu == -1) {
                        load = weighted_cpuload(i);
@@ -5943,8 +5916,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
        int sync = wake_flags & WF_SYNC;
 
        if (sd_flag & SD_BALANCE_WAKE)
        int sync = wake_flags & WF_SYNC;
 
        if (sd_flag & SD_BALANCE_WAKE)
-               want_affine = (!wake_wide(p) && task_fits_max(p, cpu) &&
-                             cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) ||
+               want_affine = (!wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) ||
                              energy_aware();
 
        rcu_read_lock();
                              energy_aware();
 
        rcu_read_lock();