static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
{
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
- u64 now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ u64 now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task;
se->avg.hmp_last_up_migration = now;
se->avg.hmp_last_down_migration = 0;
cpu_rq(cpu)->avg.hmp_last_up_migration = now;
static inline void hmp_next_down_delay(struct sched_entity *se, int cpu)
{
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
- u64 now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ u64 now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task;
se->avg.hmp_last_down_migration = now;
se->avg.hmp_last_up_migration = 0;
cpu_rq(cpu)->avg.hmp_last_down_migration = now;
static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_entity *se)
{
struct task_struct *p = task_of(se);
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
u64 now;
if (target_cpu)
return 0;
/* Let the task load settle before doing another up migration */
- now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task;
if (((now - se->avg.hmp_last_up_migration) >> 10)
< hmp_next_up_threshold)
return 0;
static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
{
struct task_struct *p = task_of(se);
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
u64 now;
if (hmp_cpu_is_slowest(cpu))
#endif
/* Let the task load settle before doing another down migration */
- now = cfs_rq_clock_task(cfs_rq);
+ /* hack - always use clock from first online CPU */
+ now = cpu_rq(cpumask_first(cpu_online_mask))->clock_task;
if (((now - se->avg.hmp_last_down_migration) >> 10)
< hmp_next_down_threshold)
return 0;