From 8503bfd72392a65bf9d6648eae2fafb1698cfae2 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Tue, 8 Apr 2014 16:43:19 +0100 Subject: [PATCH] Revert "hmp: dont attempt to pull tasks if affinity doesn't allow it" This reverts commit 5a570cfc01b06906faa8ac67ad7c0c6f278761c4. Signed-off-by: Jon Medhurst --- kernel/sched/fair.c | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1957f2589d9a..128d5723ae4d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3694,31 +3694,30 @@ static inline struct hmp_domain *hmp_faster_domain(int cpu); /* must hold runqueue lock for queue se is currently on */ static struct sched_entity *hmp_get_heaviest_task( - struct sched_entity *se, int target_cpu) + struct sched_entity *se, int migrate_up) { int num_tasks = hmp_max_tasks; struct sched_entity *max_se = se; unsigned long int max_ratio = se->avg.load_avg_ratio; const struct cpumask *hmp_target_mask = NULL; - struct hmp_domain *hmp; - if (hmp_cpu_is_fastest(cpu_of(se->cfs_rq->rq))) - return max_se; + if (migrate_up) { + struct hmp_domain *hmp; + if (hmp_cpu_is_fastest(cpu_of(se->cfs_rq->rq))) + return max_se; - hmp = hmp_faster_domain(cpu_of(se->cfs_rq->rq)); - hmp_target_mask = &hmp->cpus; - if (target_cpu >= 0) { - BUG_ON(!cpumask_test_cpu(target_cpu, hmp_target_mask)); - hmp_target_mask = cpumask_of(target_cpu); + hmp = hmp_faster_domain(cpu_of(se->cfs_rq->rq)); + hmp_target_mask = &hmp->cpus; } /* The currently running task is not on the runqueue */ se = __pick_first_entity(cfs_rq_of(se)); while (num_tasks && se) { if (entity_is_task(se) && - se->avg.load_avg_ratio > max_ratio && - cpumask_intersects(hmp_target_mask, - tsk_cpus_allowed(task_of(se)))) { + (se->avg.load_avg_ratio > max_ratio && + hmp_target_mask && + cpumask_intersects(hmp_target_mask, + tsk_cpus_allowed(task_of(se))))) { max_se = se; max_ratio = se->avg.load_avg_ratio; } @@ -7127,7 +7126,7 @@ static void hmp_force_up_migration(int this_cpu) } } orig = curr; - curr = hmp_get_heaviest_task(curr, -1); + curr = hmp_get_heaviest_task(curr, 1); p = task_of(curr); if (hmp_up_migration(cpu, &target_cpu, curr)) { cpu_rq(target_cpu)->wake_for_idle_pull = 1; @@ -7224,14 +7223,12 @@ static unsigned int hmp_idle_pull(int this_cpu) } } orig = curr; - curr = hmp_get_heaviest_task(curr, this_cpu); + curr = hmp_get_heaviest_task(curr, 1); /* check if heaviest eligible task on this * CPU is heavier than previous task */ if (hmp_task_eligible_for_up_migration(curr) && - curr->avg.load_avg_ratio > ratio && - cpumask_test_cpu(this_cpu, - tsk_cpus_allowed(task_of(curr)))) { + curr->avg.load_avg_ratio > ratio) { p = task_of(curr); target = rq; ratio = curr->avg.load_avg_ratio; -- 2.34.1