sched: Introduce priority-based task migration filter
authorMorten Rasmussen <Morten.Rasmussen@arm.com>
Fri, 14 Sep 2012 13:38:11 +0000 (14:38 +0100)
committerJon Medhurst <tixy@linaro.org>
Wed, 17 Jul 2013 10:12:24 +0000 (11:12 +0100)
Introduces a priority threshold which prevents low priority task
from migrating to faster hmp_domains (cpus). This is useful for
user-space software which assigns lower task priority to background
task.

Signed-off-by: Morten Rasmussen <Morten.Rasmussen@arm.com>
arch/arm/Kconfig
kernel/sched/fair.c

index 5fb586f4fcfe448c80cdce8154eb48735dbecdc3..6157ed8e0e3ec30431d4c3317c752068a3d5842f 100644 (file)
@@ -1511,6 +1511,19 @@ config SCHED_HMP
          !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled
          between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE).
 
+config SCHED_HMP_PRIO_FILTER
+       bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
+       depends on SCHED_HMP
+       help
+         Enables task priority based HMP migration filter. Any task with
+         a NICE value above the threshold will always be on low-power cpus
+         with less compute capacity.
+
+config SCHED_HMP_PRIO_FILTER_VAL
+       int "NICE priority threshold"
+       default 5
+       depends on SCHED_HMP_PRIO_FILTER
+
 config HAVE_ARM_SCU
        bool
        help
index 2d88b043c9e91ed944d2139835186e8749a3df30..a6b451d9cd3633c223c671e2919449a82f0d4037 100644 (file)
@@ -3392,9 +3392,14 @@ static int __init hmp_cpu_mask_setup(void)
  * hmp_down_threshold: max. load allowed for tasks migrating to a slower cpu
  * The default values (512, 256) offer good responsiveness, but may need
  * tweaking suit particular needs.
+ *
+ * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio)
  */
 unsigned int hmp_up_threshold = 512;
 unsigned int hmp_down_threshold = 256;
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
+#endif
 
 static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
 static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
@@ -5845,6 +5850,12 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
        if (hmp_cpu_is_fastest(cpu))
                return 0;
 
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+       /* Filter by task priority */
+       if (p->prio >= hmp_up_prio)
+               return 0;
+#endif
+
        if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
                                        tsk_cpus_allowed(p))
                && se->avg.load_avg_ratio > hmp_up_threshold) {
@@ -5861,6 +5872,12 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
        if (hmp_cpu_is_slowest(cpu))
                return 0;
 
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+       /* Filter by task priority */
+       if (p->prio >= hmp_up_prio)
+               return 1;
+#endif
+
        if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
                                        tsk_cpus_allowed(p))
                && se->avg.load_avg_ratio < hmp_down_threshold) {