sched: Avoid SMT siblings in select_idle_sibling() if possible
authorPeter Zijlstra <peterz@infradead.org>
Thu, 10 Nov 2011 12:01:10 +0000 (13:01 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 16 Nov 2011 07:43:43 +0000 (08:43 +0100)
Avoid select_idle_sibling() from picking a sibling thread if there's
an idle core that shares cache.

This fixes SMT balancing in the increasingly common case where there's
a shared cache core available to balance to.

Tested-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Link: http://lkml.kernel.org/r/1321350377.1421.55.camel@twins
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_fair.c

index 7e51b5bb27cc1b4b916753541fbbf78d48cd2c31..ba0e1f49a22f1ecc243d0a74e6ef09cc71e3ce49 100644 (file)
@@ -2326,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        struct sched_domain *sd;
-       int i;
+       struct sched_group *sg;
+       int i, smt = 0;
 
        /*
         * If the task is going to be woken-up on this cpu and if it is
@@ -2346,25 +2347,38 @@ static int select_idle_sibling(struct task_struct *p, int target)
         * Otherwise, iterate the domains and find an elegible idle cpu.
         */
        rcu_read_lock();
+again:
        for_each_domain(target, sd) {
-               if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
-                       break;
+               if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
+                       continue;
 
-               for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
-                       if (idle_cpu(i)) {
-                               target = i;
-                               break;
+               if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
+                       if (!smt) {
+                               smt = 1;
+                               goto again;
                        }
+                       break;
                }
 
-               /*
-                * Lets stop looking for an idle sibling when we reached
-                * the domain that spans the current cpu and prev_cpu.
-                */
-               if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
-                   cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
-                       break;
+               sg = sd->groups;
+               do {
+                       if (!cpumask_intersects(sched_group_cpus(sg),
+                                               tsk_cpus_allowed(p)))
+                               goto next;
+
+                       for_each_cpu(i, sched_group_cpus(sg)) {
+                               if (!idle_cpu(i))
+                                       goto next;
+                       }
+
+                       target = cpumask_first_and(sched_group_cpus(sg),
+                                       tsk_cpus_allowed(p));
+                       goto done;
+next:
+                       sg = sg->next;
+               } while (sg != sd->groups);
        }
+done:
        rcu_read_unlock();
 
        return target;