/* must hold runqueue lock for queue se is currently on */
static struct sched_entity *hmp_get_heaviest_task(
- struct sched_entity *se, int target_cpu)
+ struct sched_entity *se, int migrate_up)
{
int num_tasks = hmp_max_tasks;
struct sched_entity *max_se = se;
unsigned long int max_ratio = se->avg.load_avg_ratio;
const struct cpumask *hmp_target_mask = NULL;
- struct hmp_domain *hmp;
- if (hmp_cpu_is_fastest(cpu_of(se->cfs_rq->rq)))
- return max_se;
+ if (migrate_up) {
+ struct hmp_domain *hmp;
+ if (hmp_cpu_is_fastest(cpu_of(se->cfs_rq->rq)))
+ return max_se;
- hmp = hmp_faster_domain(cpu_of(se->cfs_rq->rq));
- hmp_target_mask = &hmp->cpus;
- if (target_cpu >= 0) {
- BUG_ON(!cpumask_test_cpu(target_cpu, hmp_target_mask));
- hmp_target_mask = cpumask_of(target_cpu);
+ hmp = hmp_faster_domain(cpu_of(se->cfs_rq->rq));
+ hmp_target_mask = &hmp->cpus;
}
/* The currently running task is not on the runqueue */
se = __pick_first_entity(cfs_rq_of(se));
while (num_tasks && se) {
if (entity_is_task(se) &&
- se->avg.load_avg_ratio > max_ratio &&
- cpumask_intersects(hmp_target_mask,
- tsk_cpus_allowed(task_of(se)))) {
+ (se->avg.load_avg_ratio > max_ratio &&
+ hmp_target_mask &&
+ cpumask_intersects(hmp_target_mask,
+ tsk_cpus_allowed(task_of(se))))) {
max_se = se;
max_ratio = se->avg.load_avg_ratio;
}
}
}
orig = curr;
- curr = hmp_get_heaviest_task(curr, -1);
+ curr = hmp_get_heaviest_task(curr, 1);
p = task_of(curr);
if (hmp_up_migration(cpu, &target_cpu, curr)) {
cpu_rq(target_cpu)->wake_for_idle_pull = 1;
}
}
orig = curr;
- curr = hmp_get_heaviest_task(curr, this_cpu);
+ curr = hmp_get_heaviest_task(curr, 1);
/* check if heaviest eligible task on this
* CPU is heavier than previous task
*/
if (hmp_task_eligible_for_up_migration(curr) &&
- curr->avg.load_avg_ratio > ratio &&
- cpumask_test_cpu(this_cpu,
- tsk_cpus_allowed(task_of(curr)))) {
+ curr->avg.load_avg_ratio > ratio) {
p = task_of(curr);
target = rq;
ratio = curr->avg.load_avg_ratio;