return min_cap * 1024 < task_util(p) * capacity_margin;
}
+static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu)
+{
+ int i;
+ int min_diff = 0, energy_cpu = prev_cpu, spare_cpu = prev_cpu;
+ unsigned long max_spare = 0;
+ struct sched_domain *sd;
+
+ rcu_read_lock();
+
+ sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
+
+ if (!sd)
+ goto unlock;
+
+ for_each_cpu_and(i, tsk_cpus_allowed(p), sched_domain_span(sd)) {
+ int diff;
+ unsigned long spare;
+
+ struct energy_env eenv = {
+ .util_delta = task_util(p),
+ .src_cpu = prev_cpu,
+ .dst_cpu = i,
+ };
+
+ spare = capacity_spare_wake(i, p);
+
+ if (i == prev_cpu)
+ continue;
+
+ if (spare > max_spare) {
+ max_spare = spare;
+ spare_cpu = i;
+ }
+
+ if (spare * 1024 < capacity_margin * task_util(p))
+ continue;
+
+ diff = energy_diff(&eenv);
+
+ if (diff < min_diff) {
+ min_diff = diff;
+ energy_cpu = i;
+ }
+ }
+
+unlock:
+ rcu_read_unlock();
+
+ if (energy_cpu == prev_cpu && !cpu_overutilized(prev_cpu))
+ return prev_cpu;
+
+ return energy_cpu != prev_cpu ? energy_cpu : spare_cpu;
+}
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
* that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
&& cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
+ if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized))
+ return select_energy_cpu_brute(p, prev_cpu);
+
rcu_read_lock();
for_each_domain(cpu, tmp) {
if (!(tmp->flags & SD_LOAD_BALANCE))