static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_entity *se);
static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
- int *min_cpu);
+ int *min_cpu, struct cpumask *affinity);
/* Check if cpu is in fastest hmp_domain */
static inline unsigned int hmp_cpu_is_fastest(int cpu)
/*
* Selects a cpu in previous (faster) hmp_domain
- * Note that cpumask_any_and() returns the first cpu in the cpumask
*/
static inline unsigned int hmp_select_faster_cpu(struct task_struct *tsk,
int cpu)
{
int lowest_cpu=NR_CPUS;
- __always_unused int lowest_ratio = hmp_domain_min_load(hmp_faster_domain(cpu), &lowest_cpu);
- /*
- * If the lowest-loaded CPU in the domain is allowed by the task affinity
- * select that one, otherwise select one which is allowed
- */
- if(lowest_cpu != NR_CPUS && cpumask_test_cpu(lowest_cpu,tsk_cpus_allowed(tsk)))
- return lowest_cpu;
+ __always_unused int lowest_ratio;
+ struct hmp_domain *hmp;
+
+ if (hmp_cpu_is_fastest(cpu))
+ hmp = hmp_cpu_domain(cpu);
else
- return cpumask_any_and(&hmp_faster_domain(cpu)->cpus,
- tsk_cpus_allowed(tsk));
+ hmp = hmp_faster_domain(cpu);
+
+ lowest_ratio = hmp_domain_min_load(hmp, &lowest_cpu,
+ tsk_cpus_allowed(tsk));
+
+ return lowest_cpu;
}
/*
else
hmp = hmp_slower_domain(cpu);
- lowest_ratio = hmp_domain_min_load(hmp, &lowest_cpu);
- /*
- * If the lowest-loaded CPU in the domain is allowed by the task affinity
- * select that one, otherwise select one which is allowed
- */
- if(lowest_cpu != NR_CPUS && cpumask_test_cpu(lowest_cpu,tsk_cpus_allowed(tsk)))
- return lowest_cpu;
- else
- return cpumask_any_and(&hmp_slower_domain(cpu)->cpus,
- tsk_cpus_allowed(tsk));
+ lowest_ratio = hmp_domain_min_load(hmp, &lowest_cpu,
+ tsk_cpus_allowed(tsk));
+
+ return lowest_cpu;
}
static inline void hmp_next_up_delay(struct sched_entity *se, int cpu)
}
late_initcall(hmp_attr_init);
#endif /* CONFIG_HMP_VARIABLE_SCALE */
-
+/*
+ * return the load of the lowest-loaded CPU in a given HMP domain
+ * min_cpu optionally points to an int to receive the CPU.
+ * affinity optionally points to a cpumask containing the
+ * CPUs to be considered. note:
+ * + min_cpu = NR_CPUS only if no CPUs are in the set of
+ * affinity && hmp_domain cpus
+ * + min_cpu will always otherwise equal one of the CPUs in
+ * the hmp domain
+ * + when more than one CPU has the same load, the one which
+ * is least-recently-disturbed by an HMP migration will be
+ * selected
+ * + if all CPUs are equally loaded or idle and the times are
+ * all the same, the first in the set will be used
+ * + if affinity is not set, cpu_online_mask is used
+ */
static inline unsigned int hmp_domain_min_load(struct hmp_domain *hmpd,
- int *min_cpu)
+ int *min_cpu, struct cpumask *affinity)
{
int cpu;
int min_cpu_runnable_temp = NR_CPUS;
unsigned long min_runnable_load = INT_MAX;
unsigned long contrib;
struct sched_avg *avg;
+ struct cpumask temp_cpumask;
+ /*
+ * only look at CPUs allowed if specified,
+ * otherwise look at all online CPUs in the
+ * right HMP domain
+ */
+ cpumask_and(&temp_cpumask, &hmpd->cpus, affinity ? affinity : cpu_online_mask);
- for_each_cpu_mask(cpu, hmpd->cpus) {
+ for_each_cpu_mask(cpu, temp_cpumask) {
avg = &cpu_rq(cpu)->avg;
/* used for both up and down migration */
curr_last_migration = avg->hmp_last_up_migration ?
return NR_CPUS;
/* Is there an idle CPU in the current domain */
- min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL);
+ min_usage = hmp_domain_min_load(hmp_cpu_domain(cpu), NULL, NULL);
if (min_usage == 0) {
trace_sched_hmp_offload_abort(cpu, min_usage, "load");
return NR_CPUS;
}
/* Does the slower domain have any idle CPUs? */
- min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu);
- if (min_usage > 0) {
- trace_sched_hmp_offload_abort(cpu, min_usage, "slowdomain");
- return NR_CPUS;
- }
+ min_usage = hmp_domain_min_load(hmp_slower_domain(cpu), &dest_cpu,
+ tsk_cpus_allowed(task_of(se)));
- if (cpumask_test_cpu(dest_cpu, &hmp_slower_domain(cpu)->cpus)) {
+ if (min_usage == 0) {
trace_sched_hmp_offload_succeed(cpu, dest_cpu);
return dest_cpu;
- }
-
+ } else
+ trace_sched_hmp_offload_abort(cpu,min_usage,"slowdomain");
return NR_CPUS;
}
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_HMP
/* always put non-kernel forking tasks on a big domain */
if (p->mm && (sd_flag & SD_BALANCE_FORK)) {
- if(hmp_cpu_is_fastest(prev_cpu)) {
- struct hmp_domain *hmpdom = list_entry(&hmp_cpu_domain(prev_cpu)->hmp_domains, struct hmp_domain, hmp_domains);
- __always_unused int lowest_ratio = hmp_domain_min_load(hmpdom, &new_cpu);
- if (new_cpu != NR_CPUS &&
- cpumask_test_cpu(new_cpu,
- tsk_cpus_allowed(p))) {
- hmp_next_up_delay(&p->se, new_cpu);
- return new_cpu;
- } else {
- new_cpu = cpumask_any_and(
- &hmp_faster_domain(cpu)->cpus,
- tsk_cpus_allowed(p));
- if (new_cpu < nr_cpu_ids) {
- hmp_next_up_delay(&p->se, new_cpu);
- return new_cpu;
- }
- }
- } else {
- new_cpu = hmp_select_faster_cpu(p, prev_cpu);
- if (new_cpu != NR_CPUS) {
- hmp_next_up_delay(&p->se, new_cpu);
- return new_cpu;
- }
+ new_cpu = hmp_select_faster_cpu(p, prev_cpu);
+ if (new_cpu != NR_CPUS) {
+ hmp_next_up_delay(&p->se, new_cpu);
+ return new_cpu;
}
+ /* failed to perform HMP fork balance, use normal balance */
+ new_cpu = cpu;
}
#endif
rcu_read_unlock();
#ifdef CONFIG_SCHED_HMP
+ prev_cpu = task_cpu(p);
+
if (hmp_up_migration(prev_cpu, &new_cpu, &p->se)) {
hmp_next_up_delay(&p->se, new_cpu);
trace_sched_hmp_migrate(p, new_cpu, HMP_MIGRATE_WAKEUP);
static unsigned int hmp_up_migration(int cpu, int *target_cpu, struct sched_entity *se)
{
struct task_struct *p = task_of(se);
+ int temp_target_cpu;
u64 now;
- if (target_cpu)
- *target_cpu = NR_CPUS;
-
if (hmp_cpu_is_fastest(cpu))
return 0;
* idle CPU or 1023 for any partly-busy one.
* Be explicit about requirement for an idle CPU.
*/
- if (hmp_domain_min_load(hmp_faster_domain(cpu), target_cpu) != 0)
- return 0;
-
- if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
- tsk_cpus_allowed(p)))
+ if (hmp_domain_min_load(hmp_faster_domain(cpu), &temp_target_cpu,
+ tsk_cpus_allowed(p)) == 0 && temp_target_cpu != NR_CPUS) {
+ if(target_cpu)
+ *target_cpu = temp_target_cpu;
return 1;
-
+ }
return 0;
}