2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
19 #include <linux/ftrace.h>
21 #include <linux/err.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/seq_file.h>
25 #include <linux/irq.h>
26 #include <linux/percpu.h>
27 #include <linux/clockchips.h>
28 #include <linux/completion.h>
30 #include <linux/atomic.h>
31 #include <asm/cacheflush.h>
33 #include <asm/cputype.h>
34 #include <asm/mmu_context.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/processor.h>
38 #include <asm/sections.h>
39 #include <asm/tlbflush.h>
40 #include <asm/ptrace.h>
41 #include <asm/localtimer.h>
44 * as from 2.5, kernels no longer have an init_tasks structure
45 * so we need some other way of telling a new secondary core
46 * where to place its SVC stack
48 struct secondary_data secondary_data;
58 int __cpuinit __cpu_up(unsigned int cpu)
60 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
61 struct task_struct *idle = ci->idle;
66 * Spawn a new process manually, if not already done.
67 * Grab a pointer to its task struct so we can mess with it
70 idle = fork_idle(cpu);
72 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
78 * Since this idle thread is being re-used, call
79 * init_idle() to reinitialize the thread structure.
85 * Allocate initial page tables to allow the new CPU to
86 * enable the MMU safely. This essentially means a set
87 * of our "standard" page tables, with the addition of
88 * a 1:1 mapping for the physical address of the kernel.
90 pgd = pgd_alloc(&init_mm);
94 if (PHYS_OFFSET != PAGE_OFFSET) {
95 #ifndef CONFIG_HOTPLUG_CPU
96 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
98 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
99 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
103 * We need to tell the secondary core where to find
104 * its stack and the page tables.
106 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
107 secondary_data.pgdir = virt_to_phys(pgd);
108 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
109 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
110 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
113 * Now bring the CPU into our world.
115 ret = boot_secondary(cpu, idle);
117 unsigned long timeout;
120 * CPU was successfully started, wait for it
121 * to come online or time out.
123 timeout = jiffies + HZ;
124 while (time_before(jiffies, timeout)) {
132 if (!cpu_online(cpu)) {
133 pr_crit("CPU%u: failed to come online\n", cpu);
137 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
140 secondary_data.stack = NULL;
141 secondary_data.pgdir = 0;
143 if (PHYS_OFFSET != PAGE_OFFSET) {
144 #ifndef CONFIG_HOTPLUG_CPU
145 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
147 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
148 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
151 pgd_free(&init_mm, pgd);
156 #ifdef CONFIG_HOTPLUG_CPU
157 static void percpu_timer_stop(void);
160 * __cpu_disable runs on the processor to be shutdown.
162 int __cpu_disable(void)
164 unsigned int cpu = smp_processor_id();
165 struct task_struct *p;
168 ret = platform_cpu_disable(cpu);
173 * Take this CPU offline. Once we clear this, we can't return,
174 * and we must not schedule until we're ready to give up the cpu.
176 set_cpu_online(cpu, false);
179 * OK - migrate IRQs away from this CPU
184 * Stop the local timer for this CPU.
189 * Flush user cache and TLB mappings, and then remove this CPU
190 * from the vm mask set of all processes.
193 local_flush_tlb_all();
195 read_lock(&tasklist_lock);
196 for_each_process(p) {
198 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
200 read_unlock(&tasklist_lock);
205 static DECLARE_COMPLETION(cpu_died);
208 * called on the thread which is asking for a CPU to be shutdown -
209 * waits until shutdown has completed, or it is timed out.
211 void __cpu_die(unsigned int cpu)
213 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
214 pr_err("CPU%u: cpu didn't die\n", cpu);
217 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
219 if (!platform_cpu_kill(cpu))
220 printk("CPU%u: unable to kill\n", cpu);
224 * Called from the idle thread for the CPU which has been shutdown.
226 * Note that we disable IRQs here, but do not re-enable them
227 * before returning to the caller. This is also the behaviour
228 * of the other hotplug-cpu capable cores, so presumably coming
229 * out of idle fixes this.
231 void __ref cpu_die(void)
233 unsigned int cpu = smp_processor_id();
240 /* Tell __cpu_die() that this CPU is now safe to dispose of */
244 * actual CPU shutdown procedure is at least platform (if not
247 platform_cpu_die(cpu);
250 * Do not return to the idle loop - jump back to the secondary
251 * cpu initialisation. There's some initialisation which needs
252 * to be repeated to undo the effects of taking the CPU offline.
254 __asm__("mov sp, %0\n"
256 " b secondary_start_kernel"
258 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
260 #endif /* CONFIG_HOTPLUG_CPU */
263 * Called by both boot and secondaries to move global data into
264 * per-processor storage.
266 static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
268 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
270 cpu_info->loops_per_jiffy = loops_per_jiffy;
274 * This is the secondary CPU boot entry. We're using this CPUs
275 * idle thread stack, but a set of temporary page tables.
277 asmlinkage void __cpuinit secondary_start_kernel(void)
279 struct mm_struct *mm = &init_mm;
280 unsigned int cpu = smp_processor_id();
282 printk("CPU%u: Booted secondary processor\n", cpu);
285 * All kernel threads share the same mm context; grab a
286 * reference and switch to it.
288 atomic_inc(&mm->mm_count);
289 current->active_mm = mm;
290 cpumask_set_cpu(cpu, mm_cpumask(mm));
291 cpu_switch_mm(mm->pgd, mm);
292 enter_lazy_tlb(mm, current);
293 local_flush_tlb_all();
297 trace_hardirqs_off();
300 * Give the platform a chance to do its own initialisation.
302 platform_secondary_init(cpu);
304 notify_cpu_starting(cpu);
308 smp_store_cpu_info(cpu);
311 * OK, now it's safe to let the boot CPU continue. Wait for
312 * the CPU migration code to notice that the CPU is online
313 * before we continue.
315 set_cpu_online(cpu, true);
318 * Setup the percpu timer for this CPU.
320 percpu_timer_setup();
322 while (!cpu_active(cpu))
326 * cpu_active bit is set, so it's safe to enalbe interrupts
333 * OK, it's off to the idle thread for us
338 void __init smp_cpus_done(unsigned int max_cpus)
341 unsigned long bogosum = 0;
343 for_each_online_cpu(cpu)
344 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
346 printk(KERN_INFO "SMP: Total of %d processors activated "
347 "(%lu.%02lu BogoMIPS).\n",
349 bogosum / (500000/HZ),
350 (bogosum / (5000/HZ)) % 100);
353 void __init smp_prepare_boot_cpu(void)
355 unsigned int cpu = smp_processor_id();
357 per_cpu(cpu_data, cpu).idle = current;
360 void __init smp_prepare_cpus(unsigned int max_cpus)
362 unsigned int ncores = num_possible_cpus();
364 smp_store_cpu_info(smp_processor_id());
367 * are we trying to boot more cores than exist?
369 if (max_cpus > ncores)
371 if (ncores > 1 && max_cpus) {
373 * Enable the local timer or broadcast device for the
374 * boot CPU, but only if we have more than one CPU.
376 percpu_timer_setup();
379 * Initialise the present map, which describes the set of CPUs
380 * actually populated at the present time. A platform should
381 * re-initialize the map in platform_smp_prepare_cpus() if
382 * present != possible (e.g. physical hotplug).
384 init_cpu_present(&cpu_possible_map);
387 * Initialise the SCU if there are more than one CPU
388 * and let them know where to start.
390 platform_smp_prepare_cpus(max_cpus);
394 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
396 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
401 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
403 smp_cross_call(mask, IPI_CALL_FUNC);
406 void arch_send_call_function_single_ipi(int cpu)
408 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
411 static const char *ipi_types[NR_IPI] = {
412 #define S(x,s) [x - IPI_TIMER] = s
413 S(IPI_TIMER, "Timer broadcast interrupts"),
414 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
415 S(IPI_CALL_FUNC, "Function call interrupts"),
416 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
417 S(IPI_CPU_STOP, "CPU stop interrupts"),
420 void show_ipi_list(struct seq_file *p, int prec)
424 for (i = 0; i < NR_IPI; i++) {
425 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
427 for_each_present_cpu(cpu)
428 seq_printf(p, "%10u ",
429 __get_irq_stat(cpu, ipi_irqs[i]));
431 seq_printf(p, " %s\n", ipi_types[i]);
435 u64 smp_irq_stat_cpu(unsigned int cpu)
440 for (i = 0; i < NR_IPI; i++)
441 sum += __get_irq_stat(cpu, ipi_irqs[i]);
443 #ifdef CONFIG_LOCAL_TIMERS
444 sum += __get_irq_stat(cpu, local_timer_irqs);
451 * Timer (local or broadcast) support
453 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
455 static void ipi_timer(void)
457 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
459 evt->event_handler(evt);
463 #ifdef CONFIG_LOCAL_TIMERS
464 asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
466 struct pt_regs *old_regs = set_irq_regs(regs);
467 int cpu = smp_processor_id();
469 if (local_timer_ack()) {
470 __inc_irq_stat(cpu, local_timer_irqs);
474 set_irq_regs(old_regs);
477 void show_local_irqs(struct seq_file *p, int prec)
481 seq_printf(p, "%*s: ", prec, "LOC");
483 for_each_present_cpu(cpu)
484 seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
486 seq_printf(p, " Local timer interrupts\n");
490 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
491 static void smp_timer_broadcast(const struct cpumask *mask)
493 smp_cross_call(mask, IPI_TIMER);
496 #define smp_timer_broadcast NULL
499 static void broadcast_timer_set_mode(enum clock_event_mode mode,
500 struct clock_event_device *evt)
504 static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
506 evt->name = "dummy_timer";
507 evt->features = CLOCK_EVT_FEAT_ONESHOT |
508 CLOCK_EVT_FEAT_PERIODIC |
509 CLOCK_EVT_FEAT_DUMMY;
512 evt->set_mode = broadcast_timer_set_mode;
514 clockevents_register_device(evt);
517 void __cpuinit percpu_timer_setup(void)
519 unsigned int cpu = smp_processor_id();
520 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
522 evt->cpumask = cpumask_of(cpu);
523 evt->broadcast = smp_timer_broadcast;
525 if (local_timer_setup(evt))
526 broadcast_timer_setup(evt);
529 #ifdef CONFIG_HOTPLUG_CPU
531 * The generic clock events code purposely does not stop the local timer
532 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
535 static void percpu_timer_stop(void)
537 unsigned int cpu = smp_processor_id();
538 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
540 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
544 static DEFINE_SPINLOCK(stop_lock);
547 * ipi_cpu_stop - handle IPI from smp_send_stop()
549 static void ipi_cpu_stop(unsigned int cpu)
551 if (system_state == SYSTEM_BOOTING ||
552 system_state == SYSTEM_RUNNING) {
553 spin_lock(&stop_lock);
554 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
556 spin_unlock(&stop_lock);
559 set_cpu_online(cpu, false);
569 * Main handler for inter-processor interrupts
571 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
573 unsigned int cpu = smp_processor_id();
574 struct pt_regs *old_regs = set_irq_regs(regs);
576 if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
577 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
589 generic_smp_call_function_interrupt();
592 case IPI_CALL_FUNC_SINGLE:
593 generic_smp_call_function_single_interrupt();
601 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
605 set_irq_regs(old_regs);
608 void smp_send_reschedule(int cpu)
610 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
613 void smp_send_stop(void)
615 unsigned long timeout;
617 if (num_online_cpus() > 1) {
618 cpumask_t mask = cpu_online_map;
619 cpu_clear(smp_processor_id(), mask);
621 smp_cross_call(&mask, IPI_CPU_STOP);
624 /* Wait up to one second for other CPUs to stop */
625 timeout = USEC_PER_SEC;
626 while (num_online_cpus() > 1 && timeout--)
629 if (num_online_cpus() > 1)
630 pr_warning("SMP: failed to stop secondary CPUs\n");
636 int setup_profiling_timer(unsigned int multiplier)