{
struct sched_domain *sd;
struct sched_group *sg;
- int best_idle = -1;
- int best_idle_cstate = -1;
- int best_idle_capacity = INT_MAX;
+ int best_idle_cpu = -1;
+ int best_idle_cstate = INT_MAX;
+ unsigned long best_idle_capacity = ULONG_MAX;
if (!sysctl_sched_cstate_aware) {
if (idle_cpu(target))
if (sysctl_sched_cstate_aware) {
for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
- struct rq *rq = cpu_rq(i);
- int idle_idx = idle_get_state_idx(rq);
+ int idle_idx = idle_get_state_idx(cpu_rq(i));
unsigned long new_usage = boosted_task_util(p);
unsigned long capacity_orig = capacity_orig_of(i);
+
if (new_usage > capacity_orig || !idle_cpu(i))
goto next;
if (i == target && new_usage <= capacity_curr_of(target))
return target;
- if (best_idle < 0 || (idle_idx < best_idle_cstate && capacity_orig <= best_idle_capacity)) {
- best_idle = i;
+ if (idle_idx < best_idle_cstate &&
+ capacity_orig <= best_idle_capacity) {
+ best_idle_cpu = i;
best_idle_cstate = idle_idx;
best_idle_capacity = capacity_orig;
}
sg = sg->next;
} while (sg != sd->groups);
}
- if (best_idle > 0)
- target = best_idle;
+
+ if (best_idle_cpu >= 0)
+ target = best_idle_cpu;
done:
return target;