2 * arch/arm64/kernel/topology.c
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/cpu.h>
15 #include <linux/cpumask.h>
16 #include <linux/init.h>
17 #include <linux/percpu.h>
18 #include <linux/node.h>
19 #include <linux/nodemask.h>
21 #include <linux/sched.h>
22 #include <linux/sched.h>
23 #include <linux/sched_energy.h>
25 #include <asm/cputype.h>
26 #include <asm/topology.h>
28 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
30 unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
32 #ifdef CONFIG_CPU_FREQ
33 unsigned long max_freq_scale = cpufreq_scale_max_freq_capacity(cpu);
35 return per_cpu(cpu_scale, cpu) * max_freq_scale >> SCHED_CAPACITY_SHIFT;
37 return per_cpu(cpu_scale, cpu);
41 static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
43 per_cpu(cpu_scale, cpu) = capacity;
46 static int __init get_cpu_for_node(struct device_node *node)
48 struct device_node *cpu_node;
51 cpu_node = of_parse_phandle(node, "cpu", 0);
55 for_each_possible_cpu(cpu) {
56 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
57 of_node_put(cpu_node);
62 pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
64 of_node_put(cpu_node);
68 static int __init parse_core(struct device_node *core, int cluster_id,
75 struct device_node *t;
78 snprintf(name, sizeof(name), "thread%d", i);
79 t = of_get_child_by_name(core, name);
82 cpu = get_cpu_for_node(t);
84 cpu_topology[cpu].cluster_id = cluster_id;
85 cpu_topology[cpu].core_id = core_id;
86 cpu_topology[cpu].thread_id = i;
88 pr_err("%s: Can't get CPU for thread\n",
98 cpu = get_cpu_for_node(core);
101 pr_err("%s: Core has both threads and CPU\n",
106 cpu_topology[cpu].cluster_id = cluster_id;
107 cpu_topology[cpu].core_id = core_id;
109 pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
116 static int __init parse_cluster(struct device_node *cluster, int depth)
120 bool has_cores = false;
121 struct device_node *c;
122 static int cluster_id __initdata;
127 * First check for child clusters; we currently ignore any
128 * information about the nesting of clusters and present the
129 * scheduler with a flat list of them.
133 snprintf(name, sizeof(name), "cluster%d", i);
134 c = of_get_child_by_name(cluster, name);
137 ret = parse_cluster(c, depth + 1);
145 /* Now check for cores */
148 snprintf(name, sizeof(name), "core%d", i);
149 c = of_get_child_by_name(cluster, name);
154 pr_err("%s: cpu-map children should be clusters\n",
161 ret = parse_core(c, cluster_id, core_id++);
163 pr_err("%s: Non-leaf cluster with core %s\n",
164 cluster->full_name, name);
175 if (leaf && !has_cores)
176 pr_warn("%s: empty cluster\n", cluster->full_name);
184 static int __init parse_dt_topology(void)
186 struct device_node *cn, *map;
190 cn = of_find_node_by_path("/cpus");
192 pr_err("No CPU information found in DT\n");
197 * When topology is provided cpu-map is essentially a root
198 * cluster with restricted subnodes.
200 map = of_get_child_by_name(cn, "cpu-map");
204 ret = parse_cluster(map, 0);
209 * Check that all cores are in the topology; the SMP code will
210 * only mark cores described in the DT as possible.
212 for_each_possible_cpu(cpu)
213 if (cpu_topology[cpu].cluster_id == -1)
226 struct cpu_topology cpu_topology[NR_CPUS];
227 EXPORT_SYMBOL_GPL(cpu_topology);
229 /* sd energy functions */
231 const struct sched_group_energy * const cpu_cluster_energy(int cpu)
233 struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
236 pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu);
244 const struct sched_group_energy * const cpu_core_energy(int cpu)
246 struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0];
249 pr_warn("Invalid sched_group_energy for CPU%d\n", cpu);
256 const struct cpumask *cpu_coregroup_mask(int cpu)
258 return &cpu_topology[cpu].core_sibling;
261 static inline int cpu_corepower_flags(void)
263 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN | \
267 static struct sched_domain_topology_level arm64_topology[] = {
268 #ifdef CONFIG_SCHED_MC
269 { cpu_coregroup_mask, cpu_corepower_flags, cpu_core_energy, SD_INIT_NAME(MC) },
271 { cpu_cpu_mask, NULL, cpu_cluster_energy, SD_INIT_NAME(DIE) },
275 static void update_cpu_capacity(unsigned int cpu)
277 unsigned long capacity = SCHED_CAPACITY_SCALE;
279 if (cpu_core_energy(cpu)) {
280 int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
281 capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
284 set_capacity_scale(cpu, capacity);
286 pr_info("CPU%d: update cpu_capacity %lu\n",
287 cpu, arch_scale_cpu_capacity(NULL, cpu));
290 static void update_siblings_masks(unsigned int cpuid)
292 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
295 /* update core and thread sibling masks */
296 for_each_possible_cpu(cpu) {
297 cpu_topo = &cpu_topology[cpu];
299 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
302 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
304 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
306 if (cpuid_topo->core_id != cpu_topo->core_id)
309 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
311 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
315 void store_cpu_topology(unsigned int cpuid)
317 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
320 if (cpuid_topo->cluster_id != -1)
321 goto topology_populated;
323 mpidr = read_cpuid_mpidr();
325 /* Uniprocessor systems can rely on default topology values */
326 if (mpidr & MPIDR_UP_BITMASK)
329 /* Create cpu topology mapping based on MPIDR. */
330 if (mpidr & MPIDR_MT_BITMASK) {
331 /* Multiprocessor system : Multi-threads per core */
332 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
333 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
334 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
335 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
337 /* Multiprocessor system : Single-thread per core */
338 cpuid_topo->thread_id = -1;
339 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
340 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
341 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
342 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
345 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
346 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
347 cpuid_topo->thread_id, mpidr);
350 update_siblings_masks(cpuid);
351 update_cpu_capacity(cpuid);
354 static void __init reset_cpu_topology(void)
358 for_each_possible_cpu(cpu) {
359 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
361 cpu_topo->thread_id = -1;
362 cpu_topo->core_id = 0;
363 cpu_topo->cluster_id = -1;
365 cpumask_clear(&cpu_topo->core_sibling);
366 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
367 cpumask_clear(&cpu_topo->thread_sibling);
368 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
372 void __init init_cpu_topology(void)
374 reset_cpu_topology();
377 * Discard anything that was parsed if we hit an error so we
378 * don't use partial information.
380 if (of_have_populated_dt() && parse_dt_topology())
381 reset_cpu_topology();
383 set_sched_topology(arm64_topology);
385 init_sched_energy_costs();