2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Cloned from linux/arch/arm/mach-vexpress/platsmp.c
7 * Copyright (C) 2002 ARM Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/jiffies.h>
20 #include <linux/smp.h>
22 #include <linux/of_address.h>
24 #include <asm/cacheflush.h>
26 #include <asm/smp_plat.h>
27 #include <asm/smp_scu.h>
28 #include <asm/firmware.h>
35 extern void exynos4_secondary_startup(void);
38 * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
39 * during hot-(un)plugging CPUx.
41 * The feature can be cleared safely during first boot of secondary CPU.
43 * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
44 * down a CPU so the CPU idle clock down feature could properly detect global
45 * idle state when CPUx is off.
47 static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
49 if (soc_is_exynos4()) {
52 tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
54 tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
56 tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
57 pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
61 #ifdef CONFIG_HOTPLUG_CPU
62 static inline void cpu_leave_lowpower(u32 core_id)
67 "mrc p15, 0, %0, c1, c0, 0\n"
69 " mcr p15, 0, %0, c1, c0, 0\n"
70 " mrc p15, 0, %0, c1, c0, 1\n"
72 " mcr p15, 0, %0, c1, c0, 1\n"
74 : "Ir" (CR_C), "Ir" (0x40)
77 exynos_set_delayed_reset_assertion(core_id, false);
80 static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
82 u32 mpidr = cpu_logical_map(cpu);
83 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
87 /* Turn the CPU off on next WFI instruction. */
88 exynos_cpu_power_down(core_id);
91 * Exynos4 SoCs require setting
92 * USE_DELAYED_RESET_ASSERTION so the CPU idle
93 * clock down feature could properly detect
94 * global idle state when CPUx is off.
96 exynos_set_delayed_reset_assertion(core_id, true);
100 if (pen_release == core_id) {
102 * OK, proper wakeup, we're done
108 * Getting here, means that we have come out of WFI without
109 * having been woken up - this shouldn't happen
111 * Just note it happening - when we're woken, we can report
117 #endif /* CONFIG_HOTPLUG_CPU */
120 * exynos_core_power_down : power down the specified cpu
121 * @cpu : the cpu to power down
123 * Power down the specified cpu. The sequence must be finished by a
124 * call to cpu_do_idle()
127 void exynos_cpu_power_down(int cpu)
129 pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
133 * exynos_cpu_power_up : power up the specified cpu
134 * @cpu : the cpu to power up
136 * Power up the specified cpu
138 void exynos_cpu_power_up(int cpu)
140 pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
141 EXYNOS_ARM_CORE_CONFIGURATION(cpu));
145 * exynos_cpu_power_state : returns the power state of the cpu
146 * @cpu : the cpu to retrieve the power state from
149 int exynos_cpu_power_state(int cpu)
151 return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
152 S5P_CORE_LOCAL_PWR_EN);
156 * exynos_cluster_power_down : power down the specified cluster
157 * @cluster : the cluster to power down
159 void exynos_cluster_power_down(int cluster)
161 pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
165 * exynos_cluster_power_up : power up the specified cluster
166 * @cluster : the cluster to power up
168 void exynos_cluster_power_up(int cluster)
170 pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
171 EXYNOS_COMMON_CONFIGURATION(cluster));
175 * exynos_cluster_power_state : returns the power state of the cluster
176 * @cluster : the cluster to retrieve the power state from
179 int exynos_cluster_power_state(int cluster)
181 return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
182 S5P_CORE_LOCAL_PWR_EN);
185 static inline void __iomem *cpu_boot_reg_base(void)
187 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
188 return pmu_base_addr + S5P_INFORM5;
189 return sysram_base_addr;
192 static inline void __iomem *cpu_boot_reg(int cpu)
194 void __iomem *boot_reg;
196 boot_reg = cpu_boot_reg_base();
198 return ERR_PTR(-ENODEV);
199 if (soc_is_exynos4412())
201 else if (soc_is_exynos5420() || soc_is_exynos5800())
207 * Write pen_release in a way that is guaranteed to be visible to all
208 * observers, irrespective of whether they're taking part in coherency
209 * or not. This is necessary for the hotplug code to work reliably.
211 static void write_pen_release(int val)
215 sync_cache_w(&pen_release);
218 static void __iomem *scu_base_addr(void)
220 return (void __iomem *)(S5P_VA_SCU);
223 static DEFINE_SPINLOCK(boot_lock);
225 static void exynos_secondary_init(unsigned int cpu)
228 * let the primary processor know we're out of the
229 * pen, then head off into the C entry point
231 write_pen_release(-1);
234 * Synchronise with the boot thread.
236 spin_lock(&boot_lock);
237 spin_unlock(&boot_lock);
240 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
242 unsigned long timeout;
243 u32 mpidr = cpu_logical_map(cpu);
244 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
248 * Set synchronisation state between this boot processor
249 * and the secondary one
251 spin_lock(&boot_lock);
254 * The secondary processor is waiting to be released from
255 * the holding pen - release it, then wait for it to flag
256 * that it has been released by resetting pen_release.
258 * Note that "pen_release" is the hardware CPU core ID, whereas
259 * "cpu" is Linux's internal ID.
261 write_pen_release(core_id);
263 if (!exynos_cpu_power_state(core_id)) {
264 exynos_cpu_power_up(core_id);
267 /* wait max 10 ms until cpu1 is on */
268 while (exynos_cpu_power_state(core_id)
269 != S5P_CORE_LOCAL_PWR_EN) {
277 printk(KERN_ERR "cpu1 power enable failed");
278 spin_unlock(&boot_lock);
283 * Send the secondary CPU a soft interrupt, thereby causing
284 * the boot monitor to read the system wide flags register,
285 * and branch to the address found there.
288 timeout = jiffies + (1 * HZ);
289 while (time_before(jiffies, timeout)) {
290 unsigned long boot_addr;
294 boot_addr = virt_to_phys(exynos4_secondary_startup);
297 * Try to set boot address using firmware first
298 * and fall back to boot register if it fails.
300 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
301 if (ret && ret != -ENOSYS)
303 if (ret == -ENOSYS) {
304 void __iomem *boot_reg = cpu_boot_reg(core_id);
306 if (IS_ERR(boot_reg)) {
307 ret = PTR_ERR(boot_reg);
310 __raw_writel(boot_addr, boot_reg);
313 call_firmware_op(cpu_boot, core_id);
315 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
317 if (pen_release == -1)
323 /* No harm if this is called during first boot of secondary CPU */
324 exynos_set_delayed_reset_assertion(core_id, false);
327 * now the secondary core is starting up let it run its
328 * calibrations, then wait for it to finish
331 spin_unlock(&boot_lock);
333 return pen_release != -1 ? ret : 0;
337 * Initialise the CPU possible map early - this describes the CPUs
338 * which may be present or become present in the system.
341 static void __init exynos_smp_init_cpus(void)
343 void __iomem *scu_base = scu_base_addr();
344 unsigned int i, ncores;
346 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
347 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
350 * CPU Nodes are passed thru DT and set_cpu_possible
351 * is set by "arm_dt_init_cpu_maps".
356 if (ncores > nr_cpu_ids) {
357 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
362 for (i = 0; i < ncores; i++)
363 set_cpu_possible(i, true);
366 static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
370 exynos_sysram_init();
372 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
373 scu_enable(scu_base_addr());
376 * Write the address of secondary startup into the
377 * system-wide flags register. The boot monitor waits
378 * until it receives a soft interrupt, and then the
379 * secondary CPU branches to this address.
381 * Try using firmware operation first and fall back to
382 * boot register if it fails.
384 for (i = 1; i < max_cpus; ++i) {
385 unsigned long boot_addr;
390 mpidr = cpu_logical_map(i);
391 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
392 boot_addr = virt_to_phys(exynos4_secondary_startup);
394 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
395 if (ret && ret != -ENOSYS)
397 if (ret == -ENOSYS) {
398 void __iomem *boot_reg = cpu_boot_reg(core_id);
400 if (IS_ERR(boot_reg))
402 __raw_writel(boot_addr, boot_reg);
407 #ifdef CONFIG_HOTPLUG_CPU
409 * platform-specific code to shutdown a CPU
411 * Called with IRQs disabled
413 static void exynos_cpu_die(unsigned int cpu)
416 u32 mpidr = cpu_logical_map(cpu);
417 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
419 v7_exit_coherency_flush(louis);
421 platform_do_lowpower(cpu, &spurious);
424 * bring this CPU back into the world of cache
425 * coherency, and then restore interrupts
427 cpu_leave_lowpower(core_id);
430 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
432 #endif /* CONFIG_HOTPLUG_CPU */
434 struct smp_operations exynos_smp_ops __initdata = {
435 .smp_init_cpus = exynos_smp_init_cpus,
436 .smp_prepare_cpus = exynos_smp_prepare_cpus,
437 .smp_secondary_init = exynos_secondary_init,
438 .smp_boot_secondary = exynos_boot_secondary,
439 #ifdef CONFIG_HOTPLUG_CPU
440 .cpu_die = exynos_cpu_die,