From: Chris Redpath Date: Mon, 22 Jul 2013 14:56:28 +0000 (+0100) Subject: HMP: Access runqueue task clocks directly. X-Git-Tag: firefly_0821_release~3680^2~16^2^2~43 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=0d520ee8d4e910d1400e1b21608aff3bbce7ad6f;p=firefly-linux-kernel-4.4.55.git HMP: Access runqueue task clocks directly. Avoids accesses through cfs_rq going bad when the cpu_rq doesn't have a cfs member. Signed-off-by: Chris Redpath Signed-off-by: Liviu Dudau Signed-off-by: Jon Medhurst --- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index afd76bf9433f..bfd27e89399a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3769,8 +3769,8 @@ static inline unsigned int hmp_select_slower_cpu(struct task_struct *tsk, static inline void hmp_next_up_delay(struct sched_entity *se, int cpu) { - struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; - u64 now = cfs_rq_clock_task(cfs_rq); + /* hack - always use clock from first online CPU */ + u64 now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task; se->avg.hmp_last_up_migration = now; se->avg.hmp_last_down_migration = 0; cpu_rq(cpu)->avg.hmp_last_up_migration = now; @@ -3779,8 +3779,8 @@ static inline void hmp_next_up_delay(struct sched_entity *se, int cpu) static inline void hmp_next_down_delay(struct sched_entity *se, int cpu) { - struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; - u64 now = cfs_rq_clock_task(cfs_rq); + /* hack - always use clock from first online CPU */ + u64 now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task; se->avg.hmp_last_down_migration = now; se->avg.hmp_last_up_migration = 0; cpu_rq(cpu)->avg.hmp_last_down_migration = now; @@ -6481,7 +6481,6 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { } static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_entity *se) { struct task_struct *p = task_of(se); - struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; u64 now; if (target_cpu) @@ -6499,7 +6498,8 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti return 0; /* Let the task load settle before doing another up migration */ - now = cfs_rq_clock_task(cfs_rq); + /* hack - always use clock from first online CPU */ + now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task; if (((now - se->avg.hmp_last_up_migration) >> 10) < hmp_next_up_threshold) return 0; @@ -6522,7 +6522,6 @@ static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_enti static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) { struct task_struct *p = task_of(se); - struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; u64 now; if (hmp_cpu_is_slowest(cpu)) @@ -6538,7 +6537,8 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se) #endif /* Let the task load settle before doing another down migration */ - now = cfs_rq_clock_task(cfs_rq); + /* hack - always use clock from first online CPU */ + now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task; if (((now - se->avg.hmp_last_down_migration) >> 10) < hmp_next_down_threshold) return 0;