cpumask_clear(slow);
}
+struct cpumask hmp_slow_cpu_mask;
+
void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
{
struct cpumask hmp_fast_cpu_mask;
- struct cpumask hmp_slow_cpu_mask;
struct hmp_domain *domain;
arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
return pid ? find_task_by_vpid(pid) : current;
}
+extern struct cpumask hmp_slow_cpu_mask;
+
/* Actually do priority change: must hold rq lock. */
static void
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
p->normal_prio = normal_prio(p);
/* we are holding p->pi_lock already */
p->prio = rt_mutex_getprio(p);
- if (rt_prio(p->prio))
+ if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
+#ifdef CONFIG_SCHED_HMP
+ if (cpumask_equal(&p->cpus_allowed, cpu_all_mask))
+ do_set_cpus_allowed(p, &hmp_slow_cpu_mask);
+#endif
+ }
else
p->sched_class = &fair_sched_class;
set_load_weight(p);