Revert "WIP: sched: Consider spare cpu capacity at task wake-up"
authorDietmar Eggemann <dietmar.eggemann@arm.com>
Sun, 4 Dec 2016 17:47:53 +0000 (17:47 +0000)
committerAmit Pundir <amit.pundir@linaro.org>
Wed, 21 Jun 2017 11:07:20 +0000 (16:37 +0530)
This reverts commit 75a9695b619741019363f889c99c97c7bb823797.

Change-Id: I846b21f2bdeb0b0ca30ad65683564ed07a429428
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
[ minor merge changes ]
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
kernel/sched/fair.c

index abe49df1e6f00e66aaff4f40eb05d3bfd2e9e0dc..12bb2ae026f5766fefa47dbb4f68075ea3ed9587 100644 (file)
@@ -5509,10 +5509,9 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                  int this_cpu, int sd_flag)
 {
        struct sched_group *idlest = NULL, *group = sd->groups;
-       struct sched_group *fit_group = NULL, *spare_group = NULL;
+       struct sched_group *fit_group = NULL;
        unsigned long min_load = ULONG_MAX, this_load = 0;
        unsigned long fit_capacity = ULONG_MAX;
-       unsigned long max_spare_capacity = capacity_margin - SCHED_LOAD_SCALE;
        int load_idx = sd->forkexec_idx;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -5520,7 +5519,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                load_idx = sd->wake_idx;
 
        do {
-               unsigned long load, avg_load, spare_capacity;
+               unsigned long load, avg_load;
                int local_group;
                int i;
 
@@ -5552,16 +5551,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                                fit_capacity = capacity_of(i);
                                fit_group = group;
                        }
-
-                       /*
-                        * Look for group which has most spare capacity on a
-                        * single cpu.
-                        */
-                       spare_capacity = capacity_of(i) - cpu_util(i);
-                       if (spare_capacity > max_spare_capacity) {
-                               max_spare_capacity = spare_capacity;
-                               spare_group = group;
-                       }
                }
 
                /* Adjust by relative CPU capacity of the group */
@@ -5578,9 +5567,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
        if (fit_group)
                return fit_group;
 
-       if (spare_group)
-               return spare_group;
-
        if (!idlest || 100*this_load < imbalance*min_load)
                return NULL;
        return idlest;