2 * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
4 * Created by: Nicolas Pitre, October 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
7 * Some portions of this file were originally written by Achin Gupta
8 * Copyright: (C) 2012 ARM Limited
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/delay.h>
16 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/of_address.h>
20 #include <linux/of_irq.h>
21 #include <linux/spinlock.h>
22 #include <linux/errno.h>
23 #include <linux/irqchip/arm-gic.h>
26 #include <asm/proc-fns.h>
27 #include <asm/cacheflush.h>
28 #include <asm/cputype.h>
31 #include <linux/arm-cci.h>
35 /* SCC conf registers */
36 #define RESET_CTRL 0x018
37 #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu)))
38 #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu)))
40 #define A15_CONF 0x400
42 #define SYS_INFO 0x700
43 #define SPC_BASE 0xb00
45 static void __iomem *scc;
48 * We can't use regular spinlocks. In the switcher case, it is possible
49 * for an outbound CPU to call power_down() after its inbound counterpart
50 * is already live using the same logical CPU number which trips lockdep
53 static arch_spinlock_t tc2_pm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
55 #define TC2_CLUSTERS 2
56 #define TC2_MAX_CPUS_PER_CLUSTER 3
58 static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
60 /* Keep per-cpu usage count to cope with unordered up/down requests */
61 static int tc2_pm_use_count[TC2_MAX_CPUS_PER_CLUSTER][TC2_CLUSTERS];
63 #define tc2_cluster_unused(cluster) \
64 (!tc2_pm_use_count[0][cluster] && \
65 !tc2_pm_use_count[1][cluster] && \
66 !tc2_pm_use_count[2][cluster])
68 static int tc2_pm_power_up(unsigned int cpu, unsigned int cluster)
70 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
71 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
75 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
76 * variant exists, we need to disable IRQs manually here.
79 arch_spin_lock(&tc2_pm_lock);
81 if (tc2_cluster_unused(cluster))
82 ve_spc_powerdown(cluster, false);
84 tc2_pm_use_count[cpu][cluster]++;
85 if (tc2_pm_use_count[cpu][cluster] == 1) {
86 ve_spc_set_resume_addr(cluster, cpu,
87 virt_to_phys(mcpm_entry_point));
88 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
89 } else if (tc2_pm_use_count[cpu][cluster] != 2) {
91 * The only possible values are:
94 * 2 = CPU requested to be up before it had a chance
95 * to actually make itself down.
96 * Any other value is a bug.
101 arch_spin_unlock(&tc2_pm_lock);
107 static void tc2_pm_down(u64 residency)
109 unsigned int mpidr, cpu, cluster;
110 bool last_man = false, skip_wfi = false;
112 mpidr = read_cpuid_mpidr();
113 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
114 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
116 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
117 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
119 __mcpm_cpu_going_down(cpu, cluster);
121 arch_spin_lock(&tc2_pm_lock);
122 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
123 tc2_pm_use_count[cpu][cluster]--;
124 if (tc2_pm_use_count[cpu][cluster] == 0) {
125 ve_spc_cpu_wakeup_irq(cluster, cpu, true);
126 if (tc2_cluster_unused(cluster)) {
127 ve_spc_powerdown(cluster, true);
128 ve_spc_global_wakeup_irq(true);
131 } else if (tc2_pm_use_count[cpu][cluster] == 1) {
133 * A power_up request went ahead of us.
134 * Even if we do not want to shut this CPU down,
135 * the caller expects a certain state as if the WFI
136 * was aborted. So let's continue with cache cleaning.
143 * If the CPU is committed to power down, make sure
144 * the power controller will be in charge of waking it
145 * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
146 * to the CPU by disabling the GIC CPU IF to prevent wfi
147 * from completing execution behind power controller back
152 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
153 arch_spin_unlock(&tc2_pm_lock);
155 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
157 * On the Cortex-A15 we need to disable
158 * L2 prefetching before flushing the cache.
161 "mcr p15, 1, %0, c15, c0, 3 \n\t"
167 v7_exit_coherency_flush(all);
169 cci_disable_port_by_cpu(mpidr);
171 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
174 * If last man then undo any setup done previously.
177 ve_spc_powerdown(cluster, false);
178 ve_spc_global_wakeup_irq(false);
181 arch_spin_unlock(&tc2_pm_lock);
183 v7_exit_coherency_flush(louis);
186 __mcpm_cpu_down(cpu, cluster);
188 /* Now we are prepared for power-down, do it: */
192 /* Not dead at this point? Let our caller cope. */
195 static void tc2_pm_power_down(void)
200 static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
203 RESET_A7_NCORERESET(cpu)
204 : RESET_A15_NCORERESET(cpu);
206 return !(readl_relaxed(scc + RESET_CTRL) & mask);
210 #define TIMEOUT_MSEC 1000
212 static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
216 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
217 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
219 for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
221 * Only examine the hardware state if the target CPU has
222 * caught up at least as far as tc2_pm_down():
224 if (ACCESS_ONCE(tc2_pm_use_count[cpu][cluster]) == 0) {
225 pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
226 __func__, cpu, cluster,
227 readl_relaxed(scc + RESET_CTRL));
230 * We need the CPU to reach WFI, but the power
231 * controller may put the cluster in reset and
232 * power it off as soon as that happens, before
233 * we have a chance to see STANDBYWFI.
235 * So we need to check for both conditions:
237 if (tc2_core_in_reset(cpu, cluster) ||
238 ve_spc_cpu_in_wfi(cpu, cluster))
239 return 0; /* success: the CPU is halted */
242 /* Otherwise, wait and retry: */
246 return -ETIMEDOUT; /* timeout */
249 static void tc2_pm_suspend(u64 residency)
251 unsigned int mpidr, cpu, cluster;
253 mpidr = read_cpuid_mpidr();
254 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
255 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
256 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
257 tc2_pm_down(residency);
260 static void tc2_pm_powered_up(void)
262 unsigned int mpidr, cpu, cluster;
265 mpidr = read_cpuid_mpidr();
266 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
267 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
269 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
270 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
272 local_irq_save(flags);
273 arch_spin_lock(&tc2_pm_lock);
275 if (tc2_cluster_unused(cluster)) {
276 ve_spc_powerdown(cluster, false);
277 ve_spc_global_wakeup_irq(false);
280 if (!tc2_pm_use_count[cpu][cluster])
281 tc2_pm_use_count[cpu][cluster] = 1;
283 ve_spc_cpu_wakeup_irq(cluster, cpu, false);
284 ve_spc_set_resume_addr(cluster, cpu, 0);
286 arch_spin_unlock(&tc2_pm_lock);
287 local_irq_restore(flags);
290 static const struct mcpm_platform_ops tc2_pm_power_ops = {
291 .power_up = tc2_pm_power_up,
292 .power_down = tc2_pm_power_down,
293 .wait_for_powerdown = tc2_pm_wait_for_powerdown,
294 .suspend = tc2_pm_suspend,
295 .powered_up = tc2_pm_powered_up,
298 static bool __init tc2_pm_usage_count_init(void)
300 unsigned int mpidr, cpu, cluster;
302 mpidr = read_cpuid_mpidr();
303 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
304 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
306 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
307 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
308 pr_err("%s: boot CPU is out of bound!\n", __func__);
311 tc2_pm_use_count[cpu][cluster] = 1;
316 * Enable cluster-level coherency, in preparation for turning on the MMU.
318 static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
323 " b cci_enable_port_for_self ");
326 static void __init tc2_cache_off(void)
328 pr_info("TC2: disabling cache during MCPM loopback test\n");
329 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
330 /* disable L2 prefetching on the Cortex-A15 */
332 "mcr p15, 1, %0, c15, c0, 3 \n\t"
337 v7_exit_coherency_flush(all);
338 cci_disable_port_by_cpu(read_cpuid_mpidr());
341 static int __init tc2_pm_init(void)
344 u32 a15_cluster_id, a7_cluster_id, sys_info;
345 struct device_node *np;
348 * The power management-related features are hidden behind
349 * SCC registers. We need to extract runtime information like
350 * cluster ids and number of CPUs really available in clusters.
352 np = of_find_compatible_node(NULL, NULL,
353 "arm,vexpress-scc,v2p-ca15_a7");
354 scc = of_iomap(np, 0);
358 a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
359 a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
360 if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
363 sys_info = readl_relaxed(scc + SYS_INFO);
364 tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
365 tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
367 irq = irq_of_parse_and_map(np, 0);
370 * A subset of the SCC registers is also used to communicate
371 * with the SPC (power controller). We need to be able to
372 * drive it very early in the boot process to power up
373 * processors, so we initialize the SPC driver here.
375 ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
382 if (!tc2_pm_usage_count_init())
385 ret = mcpm_platform_register(&tc2_pm_power_ops);
387 mcpm_sync_init(tc2_pm_power_up_setup);
388 /* test if we can (re)enable the CCI on our own */
389 BUG_ON(mcpm_loopback(tc2_cache_off) != 0);
390 pr_info("TC2 power management initialized\n");
395 early_initcall(tc2_pm_init);