HMP: Do not fork-boost tasks coming from PIDs <= 2
authorChris Redpath <chris.redpath@arm.com>
Tue, 12 Aug 2014 13:50:54 +0000 (14:50 +0100)
committerJon Medhurst <tixy@linaro.org>
Tue, 12 Aug 2014 16:46:57 +0000 (17:46 +0100)
System services are generally started by init, whilst kernel threads
are started by kthreadd. We do not want to give those tasks a head
start, as this costs power for very little benefit. We do however
wish to do that for tasks which the user launches.

Further, some tasks allocate per-cpu timers directly after launch
which can lead to those tasks being always scheduled on a big CPU
when there is no computational need to do so. Not promoting services
to big CPUs on launch will prevent that unless a service allocates
their per-cpu resources after a period of intense computation, which
is not a common pattern.

Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Signed-off-by: Jon Medhurst <tixy@linaro.org>
include/linux/sched.h
kernel/sched/core.c
kernel/sched/fair.c

index 0e2a546cdadee9015d2a1dedb6ed748612b10a86..b36dd2de437dadc21ed3d5cc76971727c33a2b48 100644 (file)
@@ -946,6 +946,14 @@ struct sched_avg {
        u32 usage_avg_sum;
 };
 
+#ifdef CONFIG_SCHED_HMP
+/*
+ * We want to avoid boosting any processes forked from init (PID 1)
+ * and kthreadd (assumed to be PID 2).
+ */
+#define hmp_task_should_forkboost(task) ((task->parent && task->parent->pid > 2))
+#endif
+
 #ifdef CONFIG_SCHEDSTATS
 struct sched_statistics {
        u64                     wait_start;
index 5f242330ef8520c4e25cb57d264b804c7b6fc159..65aaa1c78ca1493fe89d942dfa24d484ea2c8819 100644 (file)
@@ -1629,9 +1629,9 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_SCHED_HMP
        /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */
 #define LOAD_AVG_MAX 47742
-       if (p->mm) {
-               p->se.avg.hmp_last_up_migration = 0;
-               p->se.avg.hmp_last_down_migration = 0;
+       p->se.avg.hmp_last_up_migration = 0;
+       p->se.avg.hmp_last_down_migration = 0;
+       if (hmp_task_should_forkboost(p)) {
                p->se.avg.load_avg_ratio = 1023;
                p->se.avg.load_avg_contrib =
                                (1023 * scale_load_down(p->se.load.weight));
index 161da1ab3995e3bacc2f507049531555f85a545f..74a5adfefeb7e865ec68c176c97f6d9a63b7d365 100644 (file)
@@ -4358,7 +4358,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
 
 #ifdef CONFIG_SCHED_HMP
        /* always put non-kernel forking tasks on a big domain */
-       if (p->mm && (sd_flag & SD_BALANCE_FORK)) {
+       if (unlikely(sd_flag & SD_BALANCE_FORK) && hmp_task_should_forkboost(p)) {
                new_cpu = hmp_select_faster_cpu(p, prev_cpu);
                if (new_cpu != NR_CPUS) {
                        hmp_next_up_delay(&p->se, new_cpu);