sched: Ignore offline CPUs in HMP migration & load stats
authorChris Redpath <chris.redpath@arm.com>
Thu, 16 May 2013 16:48:24 +0000 (17:48 +0100)
committerJon Medhurst <tixy@linaro.org>
Wed, 17 Jul 2013 10:12:26 +0000 (11:12 +0100)
Previously, an offline CPU would always appear to have a zero load
and this would distort the offload functionality used for balancing
big and little domains.

Maintain a mask of online CPUs in each domain and use this instead.

Change-Id: I639b564b2f40cb659af8ceb8bd37f84b8a1fe323
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
arch/arm/kernel/topology.c
include/linux/sched.h
kernel/sched/fair.c

index f2ca9e030808ed1ab2d09779bd9a2d6403b9caf2..9047dd1c5a12f1cdd33a10dde31c8a1acf41b5a2 100644 (file)
@@ -383,12 +383,14 @@ void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
        if(!cpumask_empty(&hmp_slow_cpu_mask)) {
                domain = (struct hmp_domain *)
                        kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
-               cpumask_copy(&domain->cpus, &hmp_slow_cpu_mask);
+               cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
+               cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
                list_add(&domain->hmp_domains, hmp_domains_list);
        }
        domain = (struct hmp_domain *)
                kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
-       cpumask_copy(&domain->cpus, &hmp_fast_cpu_mask);
+       cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
+       cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
        list_add(&domain->hmp_domains, hmp_domains_list);
 }
 #endif /* CONFIG_SCHED_HMP */
index 5e903596e489a2906a694be29a50305ce1430593..0e2a546cdadee9015d2a1dedb6ed748612b10a86 100644 (file)
@@ -888,6 +888,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu);
 #ifdef CONFIG_SCHED_HMP
 struct hmp_domain {
        struct cpumask cpus;
+       struct cpumask possible_cpus;
        struct list_head hmp_domains;
 };
 #endif /* CONFIG_SCHED_HMP */
index 3866dcc9972d31909fa56f26e4f187c68c15669b..10e7dbbbf838048c9b60786656c801c64669e096 100644 (file)
@@ -3381,10 +3381,10 @@ static int __init hmp_cpu_mask_setup(void)
        dc = 0;
        list_for_each(pos, &hmp_domains) {
                domain = list_entry(pos, struct hmp_domain, hmp_domains);
-               cpulist_scnprintf(buf, 64, &domain->cpus);
+               cpulist_scnprintf(buf, 64, &domain->possible_cpus);
                pr_debug("  HMP domain %d: %s\n", dc, buf);
 
-               for_each_cpu_mask(cpu, domain->cpus) {
+               for_each_cpu_mask(cpu, domain->possible_cpus) {
                        per_cpu(hmp_cpu_domain, cpu) = domain;
                }
                dc++;
@@ -3393,6 +3393,35 @@ static int __init hmp_cpu_mask_setup(void)
        return 1;
 }
 
+static struct hmp_domain *hmp_get_hmp_domain_for_cpu(int cpu)
+{
+       struct hmp_domain *domain;
+       struct list_head *pos;
+
+       list_for_each(pos, &hmp_domains) {
+               domain = list_entry(pos, struct hmp_domain, hmp_domains);
+               if(cpumask_test_cpu(cpu, &domain->possible_cpus))
+                       return domain;
+       }
+       return NULL;
+}
+
+static void hmp_online_cpu(int cpu)
+{
+       struct hmp_domain *domain = hmp_get_hmp_domain_for_cpu(cpu);
+
+       if(domain)
+               cpumask_set_cpu(cpu, &domain->cpus);
+}
+
+static void hmp_offline_cpu(int cpu)
+{
+       struct hmp_domain *domain = hmp_get_hmp_domain_for_cpu(cpu);
+
+       if(domain)
+               cpumask_clear_cpu(cpu, &domain->cpus);
+}
+
 /*
  * Migration thresholds should be in the range [0..1023]
  * hmp_up_threshold: min. load required for migrating tasks to a faster cpu
@@ -6190,11 +6219,17 @@ void trigger_load_balance(struct rq *rq, int cpu)
 
 static void rq_online_fair(struct rq *rq)
 {
+#ifdef CONFIG_SCHED_HMP
+       hmp_online_cpu(rq->cpu);
+#endif
        update_sysctl();
 }
 
 static void rq_offline_fair(struct rq *rq)
 {
+#ifdef CONFIG_SCHED_HMP
+       hmp_offline_cpu(rq->cpu);
+#endif
        update_sysctl();
 
        /* Ensure any throttled groups are reachable by pick_next_task */