From 41e58098703b24e33f8372f82a65bb0f82ecf14a Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Wed, 24 Aug 2016 11:27:27 +0100 Subject: [PATCH] FIXUP: sched/tune: update accouting before CPU capacity The SchedTune tasks accounting is used to identify how many tasks are in a boostgroup and thus to bias the selection of an OPP based on the maximum boost value of the active boostgroups. The current implementation however update the accounting after CPU capacity has been update. This has two effects: a) when we enqueue a boosted task, we do not immediately boost its CPU b) when we dequeue a boosted task, we can keep a CPU boosted even if not required This patch change the order of the SchedTune accounting and SchedFreq updated to ensure to have always an updated representation of which boosted tasks are runnable on a CPU before updating its capacity. Reported-by: Leo Yan Signed-off-by: Patrick Bellasi --- kernel/sched/fair.c | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index efa516dfd6bc..87a3d793f35b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4255,6 +4255,25 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) #ifdef CONFIG_SMP + /* + * Update SchedTune accounting. + * + * We do it before updating the CPU capacity to ensure the + * boost value of the current task is accounted for in the + * selection of the OPP. + * + * We do it also in the case where we enqueue a throttled task; + * we could argue that a throttled task should not boost a CPU, + * however: + * a) properly implementing CPU boosting considering throttled + * tasks will increase a lot the complexity of the solution + * b) it's not easy to quantify the benefits introduced by + * such a more complex solution. + * Thus, for the time being we go for the simple solution and boost + * also for throttled RQs. + */ + schedtune_enqueue_task(p, cpu_of(rq)); + if (!se) { walt_inc_cumulative_runnable_avg(rq, p); if (!task_new && !rq->rd->overutilized && @@ -4274,9 +4293,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) update_capacity_of(cpu_of(rq)); } - /* Update SchedTune accouting */ - schedtune_enqueue_task(p, cpu_of(rq)); - #endif /* CONFIG_SMP */ hrtick_update(rq); } @@ -4342,6 +4358,15 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) #ifdef CONFIG_SMP + /* + * Update SchedTune accounting + * + * We do it before updating the CPU capacity to ensure the + * boost value of the current task is accounted for in the + * selection of the OPP. + */ + schedtune_dequeue_task(p, cpu_of(rq)); + if (!se) { walt_dec_cumulative_runnable_avg(rq, p); @@ -4361,9 +4386,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) } } - /* Update SchedTune accouting */ - schedtune_dequeue_task(p, cpu_of(rq)); - #endif /* CONFIG_SMP */ hrtick_update(rq); -- 2.34.1