return min_cap * 1024 < task_util(p) * capacity_margin;
}
-static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu)
+static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
{
int i;
int min_diff = 0, energy_cpu = prev_cpu, spare_cpu = prev_cpu;
unsigned long max_spare = 0;
struct sched_domain *sd;
+ if (sysctl_sched_sync_hint_enable && sync) {
+ int cpu = smp_processor_id();
+ cpumask_t search_cpus;
+ cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
+ if (cpumask_test_cpu(cpu, &search_cpus))
+ return cpu;
+ }
+
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
&& cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized))
- return select_energy_cpu_brute(p, prev_cpu);
+ return select_energy_cpu_brute(p, prev_cpu, sync);
rcu_read_lock();
for_each_domain(cpu, tmp) {