From 954978dd2cff81cc15745b9e581a1709e238f8ef Mon Sep 17 00:00:00 2001 From: Chris Redpath Date: Mon, 17 Jun 2013 16:08:40 +0100 Subject: [PATCH] HMP: Force new non-kernel tasks onto big CPUs until load stabilises Initialise the load stats for new tasks so that they do not see the instability in early task life which makes it so hard to decide which CPU is appropriate. Also, change the fork balance algorithm so that the least loaded of the CPUs in the big cluster is chosen regardless of the bigness of the parent task. This is intended to help performance for applications which use many short-lived tasks. Although best practise is usually to use a thread pool, apps which do not do this should not be subject to the randomness of the early stats. We should ignore real-time threads for forking on big CPUs, but it is not possible to figure out if a new thread is real-time or not at the fork stage. Instead, we prevent kernel threads from getting the initial boost - when they later become real-time they will only be on big if their compute requirements demand it. Signed-off-by: Dietmar Eggemann Signed-off-by: Chris Redpath --- kernel/sched/core.c | 14 ++++++++++++-- kernel/sched/fair.c | 22 ++++++++++++++++++++++ 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e45295dc33ad..4c53da3781e0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1618,8 +1618,18 @@ static void __sched_fork(struct task_struct *p) p->se.avg.runnable_avg_period = 0; p->se.avg.runnable_avg_sum = 0; #ifdef CONFIG_SCHED_HMP - p->se.avg.hmp_last_up_migration = 0; - p->se.avg.hmp_last_down_migration = 0; + /* keep LOAD_AVG_MAX in sync with fair.c if load avg series is changed */ +#define LOAD_AVG_MAX 47742 + if (p->mm) { + p->se.avg.hmp_last_up_migration = 0; + p->se.avg.hmp_last_down_migration = 0; + p->se.avg.load_avg_ratio = 1023; + p->se.avg.load_avg_contrib = + (1023 * scale_load_down(p->se.load.weight)); + p->se.avg.runnable_avg_period = LOAD_AVG_MAX; + p->se.avg.runnable_avg_sum = LOAD_AVG_MAX; + p->se.avg.usage_avg_sum = LOAD_AVG_MAX; + } #endif #endif #ifdef CONFIG_SCHEDSTATS diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a38a3edb07de..1b784eea661d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3957,6 +3957,28 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) if (p->nr_cpus_allowed == 1) return prev_cpu; +#ifdef CONFIG_SCHED_HMP + /* always put non-kernel forking tasks on a big domain */ + if (p->mm && (sd_flag & SD_BALANCE_FORK)) { + if(hmp_cpu_is_fastest(prev_cpu)) { + struct hmp_domain *hmpdom = list_entry(&hmp_cpu_domain(prev_cpu)->hmp_domains, struct hmp_domain, hmp_domains); + __always_unused int lowest_ratio = hmp_domain_min_load(hmpdom, &new_cpu); + if(new_cpu != NR_CPUS && cpumask_test_cpu(new_cpu,tsk_cpus_allowed(p))) + return new_cpu; + else { + new_cpu = cpumask_any_and(&hmp_faster_domain(cpu)->cpus, + tsk_cpus_allowed(p)); + if(new_cpu < nr_cpu_ids) + return new_cpu; + } + } else { + new_cpu = hmp_select_faster_cpu(p, prev_cpu); + if (new_cpu != NR_CPUS) + return new_cpu; + } + } +#endif + if (sd_flag & SD_BALANCE_WAKE) { if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) want_affine = 1; -- 2.34.1