HMP: Use unweighted load for hmp migration decisions
authorChris Redpath <chris.redpath@arm.com>
Mon, 17 Jun 2013 14:48:15 +0000 (15:48 +0100)
committerJon Medhurst <tixy@linaro.org>
Wed, 17 Jul 2013 10:32:28 +0000 (11:32 +0100)
Normal task and runqueue loading is scaled according to priority
to end up with a weighted load, known as the contribution.

We want the CPU time to be allotted according to priority, but
we also want to make big/little decisions based upon raw load.

It is common, for example, for Android apps following the dev
guide to end up with all their long-running or async action
threads as low priority unless they override the AsyncThread
constructor. All these threads are such low priority that they
become invisible to the hmp_offload routine.

Using unweighted load here allows us to maximise CPU usage in busy
situations.

Signed-off-by: Chris Redpath <chris.redpath@arm.com>
kernel/sched/fair.c

index 90f61d848cb2eb8e5e21f278da1e1f8cd511acdc..a90a63807cf9639dc4c28f25608355296be9330f 100644 (file)
@@ -3841,20 +3841,24 @@ static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
                                                int *min_cpu)
 {
        int cpu;
-       int min_load = INT_MAX;
-       int min_cpu_temp = NR_CPUS;
+       int min_cpu_runnable_temp = NR_CPUS;
+       unsigned long min_runnable_load = INT_MAX;
+       unsigned long contrib;
 
        for_each_cpu_mask(cpu, hmpd->cpus) {
-               if (cpu_rq(cpu)->cfs.tg_load_contrib < min_load) {
-                       min_load = cpu_rq(cpu)->cfs.tg_load_contrib;
-                       min_cpu_temp = cpu;
+               /* don't use the divisor in the loop, just at the end */
+               contrib = cpu_rq(cpu)->avg.runnable_avg_sum * scale_load_down(1024);
+               if (contrib < min_runnable_load) {
+                       min_runnable_load = contrib;
+                       min_cpu_runnable_temp = cpu;
                }
        }
 
        if (min_cpu)
-               *min_cpu = min_cpu_temp;
+               *min_cpu = min_cpu_runnable_temp;
 
-       return min_load;
+       /* domain will often have at least one empty CPU */
+       return min_runnable_load ? min_runnable_load / (LOAD_AVG_MAX + 1) : 0;
 }
 
 /*
@@ -3882,22 +3886,18 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
                return NR_CPUS;
 
        /* Is the current domain fully loaded? */
-       /* load < ~94% */
+       /* load < ~50% */
        min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
-       if (min_usage < NICE_0_LOAD-64)
-               return NR_CPUS;
-
-       /* Is the cpu oversubscribed? */
-       /* load < ~194% */
-       if (cpu_rq(cpu)->cfs.tg_load_contrib < 2*NICE_0_LOAD-64)
+       if (min_usage < (NICE_0_LOAD>>1))
                return NR_CPUS;
 
        /* Is the task alone on the cpu? */
-       if (cpu_rq(cpu)->nr_running < 2)
+       if (cpu_rq(cpu)->cfs.nr_running < 2)
                return NR_CPUS;
 
        /* Is the task actually starving? */
-       if (hmp_task_starvation(se) > 768) /* <25% waiting */
+       /* >=25% ratio running/runnable = starving */
+       if (hmp_task_starvation(se) > 768)
                return NR_CPUS;
 
        /* Does the slower domain have spare cycles? */
@@ -3908,6 +3908,7 @@ static inline unsigned int hmp_offload_down(int cpu, struct sched_entity *se)
 
        if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus))
                return dest_cpu;
+
        return NR_CPUS;
 }
 #endif /* CONFIG_SCHED_HMP */