sched: entity load-tracking load_avg_ratio
authorMorten Rasmussen <Morten.Rasmussen@arm.com>
Fri, 14 Sep 2012 13:38:08 +0000 (14:38 +0100)
committerJon Medhurst <tixy@linaro.org>
Wed, 17 Jul 2013 10:12:24 +0000 (11:12 +0100)
This patch adds load_avg_ratio to each task. The load_avg_ratio is a
variant of load_avg_contrib which is not scaled by the task priority. It
is calculated like this:

runnable_avg_sum * NICE_0_LOAD / (runnable_avg_period + 1).

Signed-off-by: Morten Rasmussen <Morten.Rasmussen@arm.com>
include/linux/sched.h
kernel/sched/fair.c

index fe752ca19a8dfbc19033e11627209a08151bbe6b..207054a9867aede70f65ccc42b02ab3aa5e0b7cd 100644 (file)
@@ -931,6 +931,7 @@ struct sched_avg {
        u64 last_runnable_update;
        s64 decay_count;
        unsigned long load_avg_contrib;
+       unsigned long load_avg_ratio;
        u32 usage_avg_sum;
 };
 
index 98979ddf5407bb1318e13c29ef761781afde2485..e6db20a013b019b76baaedffe233573117964f34 100644 (file)
@@ -1428,6 +1428,9 @@ static inline void __update_task_entity_contrib(struct sched_entity *se)
        contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
        contrib /= (se->avg.runnable_avg_period + 1);
        se->avg.load_avg_contrib = scale_load(contrib);
+       contrib = se->avg.runnable_avg_sum * scale_load_down(NICE_0_LOAD);
+       contrib /= (se->avg.runnable_avg_period + 1);
+       se->avg.load_avg_ratio = scale_load(contrib);
 }
 
 /* Compute the current contribution to load_avg by se, return any delta */