void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
++++int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
+++
++++ #ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
++++ /* Common values for CPUs */
++++ #ifndef SD_CPU_INIT
++++ #define SD_CPU_INIT (struct sched_domain) { \
++++ .min_interval = 1, \
++++ .max_interval = 4, \
++++ .busy_factor = 64, \
++++ .imbalance_pct = 125, \
++++ .cache_nice_tries = 1, \
++++ .busy_idx = 2, \
++++ .idle_idx = 1, \
++++ .newidle_idx = 0, \
++++ .wake_idx = 0, \
++++ .forkexec_idx = 0, \
++++ \
++++ .flags = 0*SD_LOAD_BALANCE \
++++ | 1*SD_BALANCE_NEWIDLE \
++++ | 1*SD_BALANCE_EXEC \
++++ | 1*SD_BALANCE_FORK \
++++ | 0*SD_BALANCE_WAKE \
++++ | 1*SD_WAKE_AFFINE \
++++ | 0*SD_SHARE_CPUPOWER \
++++ | 0*SD_SHARE_PKG_RESOURCES \
++++ | 0*SD_SERIALIZE \
++++ , \
++++ .last_balance = jiffies, \
++++ .balance_interval = 1, \
++++ }
++++ #endif
++++ #endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
+
#else
static inline void init_cpu_topology(void) { }
cpu_topology[cpuid].socket_id, mpidr);
}
++++
++++ #ifdef CONFIG_SCHED_HMP
++++
++++ static const char * const little_cores[] = {
++++ "arm,cortex-a7",
++++ NULL,
++++ };
++++
++++ static bool is_little_cpu(struct device_node *cn)
++++ {
++++ const char * const *lc;
++++ for (lc = little_cores; *lc; lc++)
++++ if (of_device_is_compatible(cn, *lc))
++++ return true;
++++ return false;
++++ }
++++
++++ void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
++++ struct cpumask *slow)
++++ {
++++ struct device_node *cn = NULL;
++++ int cpu;
++++
++++ cpumask_clear(fast);
++++ cpumask_clear(slow);
++++
++++ /*
++++ * Use the config options if they are given. This helps testing
++++ * HMP scheduling on systems without a big.LITTLE architecture.
++++ */
++++ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
++++ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
++++ WARN(1, "Failed to parse HMP fast cpu mask!\n");
++++ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
++++ WARN(1, "Failed to parse HMP slow cpu mask!\n");
++++ return;
++++ }
++++
++++ /*
++++ * Else, parse device tree for little cores.
++++ */
++++ while ((cn = of_find_node_by_type(cn, "cpu"))) {
++++
++++ const u32 *mpidr;
++++ int len;
++++
++++ mpidr = of_get_property(cn, "reg", &len);
++++ if (!mpidr || len != 4) {
++++ pr_err("* %s missing reg property\n", cn->full_name);
++++ continue;
++++ }
++++
++++ cpu = get_logical_index(be32_to_cpup(mpidr));
++++ if (cpu == -EINVAL) {
++++ pr_err("couldn't get logical index for mpidr %x\n",
++++ be32_to_cpup(mpidr));
++++ break;
++++ }
++++
++++ if (is_little_cpu(cn))
++++ cpumask_set_cpu(cpu, slow);
++++ else
++++ cpumask_set_cpu(cpu, fast);
++++ }
++++
++++ if (!cpumask_empty(fast) && !cpumask_empty(slow))
++++ return;
++++
++++ /*
++++ * We didn't find both big and little cores so let's call all cores
++++ * fast as this will keep the system running, with all cores being
++++ * treated equal.
++++ */
++++ cpumask_setall(fast);
++++ cpumask_clear(slow);
++++ }
++++
++++ struct cpumask hmp_slow_cpu_mask;
++++
++++ void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
++++ {
++++ struct cpumask hmp_fast_cpu_mask;
++++ struct hmp_domain *domain;
++++
++++ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
++++
++++ /*
++++ * Initialize hmp_domains
++++ * Must be ordered with respect to compute capacity.
++++ * Fastest domain at head of list.
++++ */
++++ if(!cpumask_empty(&hmp_slow_cpu_mask)) {
++++ domain = (struct hmp_domain *)
++++ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
++++ cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
++++ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
++++ list_add(&domain->hmp_domains, hmp_domains_list);
++++ }
++++ domain = (struct hmp_domain *)
++++ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
++++ cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
++++ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
++++ list_add(&domain->hmp_domains, hmp_domains_list);
++++ }
++++ #endif /* CONFIG_SCHED_HMP */
++++
++++
++++/*
++++ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
++++ * @socket_id: cluster HW identifier
++++ * @cluster_mask: the cpumask location to be initialized, modified by the
++++ * function only if return value == 0
++++ *
++++ * Return:
++++ *
++++ * 0 on success
++++ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
++++ */
++++int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
++++{
++++ int cpu;
++++
++++ if (!cluster_mask)
++++ return -EINVAL;
++++
++++ for_each_online_cpu(cpu)
++++ if (socket_id == topology_physical_package_id(cpu)) {
++++ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
++++ return 0;
++++ }
++++
++++ return -EINVAL;
++++}
++++
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array