From: Dietmar Eggemann Date: Wed, 25 Jan 2017 10:03:36 +0000 (+0000) Subject: Partial Revert: "WIP: sched: Add cpu capacity awareness to wakeup balancing" X-Git-Tag: release-20171130_firefly~4^2~100^2~75 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=ff2170e6c647a6d1566707fa0665b9f64660c5f5;p=firefly-linux-kernel-4.4.55.git Partial Revert: "WIP: sched: Add cpu capacity awareness to wakeup balancing" Revert the changes in find_idlest_cpu() and find_idlest_group(). Keep the infrastructure bits which are used in following EAS patches. Change-Id: Id516ca5f3e51b9a13db1ebb8de2df3aa25f9679b Signed-off-by: Dietmar Eggemann --- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 12bb2ae026f5..2150edce955a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5384,11 +5384,6 @@ static inline bool task_fits_max(struct task_struct *p, int cpu) return __task_fits(p, cpu, 0); } -static inline bool task_fits_spare(struct task_struct *p, int cpu) -{ - return __task_fits(p, cpu, cpu_util(cpu)); -} - static bool cpu_overutilized(int cpu) { return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin); @@ -5509,9 +5504,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu, int sd_flag) { struct sched_group *idlest = NULL, *group = sd->groups; - struct sched_group *fit_group = NULL; unsigned long min_load = ULONG_MAX, this_load = 0; - unsigned long fit_capacity = ULONG_MAX; int load_idx = sd->forkexec_idx; int imbalance = 100 + (sd->imbalance_pct-100)/2; @@ -5542,15 +5535,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, load = target_load(i, load_idx); avg_load += load; - - /* - * Look for most energy-efficient group that can fit - * that can fit the task. - */ - if (capacity_of(i) < fit_capacity && task_fits_spare(p, i)) { - fit_capacity = capacity_of(i); - fit_group = group; - } } /* Adjust by relative CPU capacity of the group */ @@ -5564,9 +5548,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, } } while (group = group->next, group != sd->groups); - if (fit_group) - return fit_group; - if (!idlest || 100*this_load < imbalance*min_load) return NULL; return idlest; @@ -5587,7 +5568,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) /* Traverse only the allowed CPUs */ for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { - if (task_fits_spare(p, i)) { + if (idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); if (idle && idle->exit_latency < min_exit_latency) { @@ -5599,8 +5580,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) min_exit_latency = idle->exit_latency; latest_idle_timestamp = rq->idle_stamp; shallowest_idle_cpu = i; - } else if (idle_cpu(i) && - (!idle || idle->exit_latency == min_exit_latency) && + } else if ((!idle || idle->exit_latency == min_exit_latency) && rq->idle_stamp > latest_idle_timestamp) { /* * If equal or no active idle state, then @@ -5609,13 +5589,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) */ latest_idle_timestamp = rq->idle_stamp; shallowest_idle_cpu = i; - } else if (shallowest_idle_cpu == -1) { - /* - * If we haven't found an idle CPU yet - * pick a non-idle one that can fit the task as - * fallback. - */ - shallowest_idle_cpu = i; } } else if (shallowest_idle_cpu == -1) { load = weighted_cpuload(i); @@ -5943,8 +5916,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f int sync = wake_flags & WF_SYNC; if (sd_flag & SD_BALANCE_WAKE) - want_affine = (!wake_wide(p) && task_fits_max(p, cpu) && - cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) || + want_affine = (!wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) || energy_aware(); rcu_read_lock();