2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
25 #include <linux/percpu.h>
26 #include <linux/clockchips.h>
28 #include <asm/atomic.h>
29 #include <asm/cacheflush.h>
31 #include <asm/cputype.h>
32 #include <asm/mmu_context.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/processor.h>
36 #include <asm/tlbflush.h>
37 #include <asm/ptrace.h>
38 #include <asm/localtimer.h>
39 #include <asm/smp_plat.h>
42 * as from 2.5, kernels no longer have an init_tasks structure
43 * so we need some other way of telling a new secondary core
44 * where to place its SVC stack
46 struct secondary_data secondary_data;
49 * structures for inter-processor calls
50 * - A collection of single bit ipi messages.
54 unsigned long ipi_count;
58 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
59 .lock = SPIN_LOCK_UNLOCKED,
70 int __cpuinit __cpu_up(unsigned int cpu)
72 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
73 struct task_struct *idle = ci->idle;
79 * Spawn a new process manually, if not already done.
80 * Grab a pointer to its task struct so we can mess with it
83 idle = fork_idle(cpu);
85 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
92 * Allocate initial page tables to allow the new CPU to
93 * enable the MMU safely. This essentially means a set
94 * of our "standard" page tables, with the addition of
95 * a 1:1 mapping for the physical address of the kernel.
97 pgd = pgd_alloc(&init_mm);
98 pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET);
99 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
100 PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
101 flush_pmd_entry(pmd);
102 outer_clean_range(__pa(pmd), __pa(pmd + 1));
105 * We need to tell the secondary core where to find
106 * its stack and the page tables.
108 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
109 secondary_data.pgdir = virt_to_phys(pgd);
110 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
111 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
114 * Now bring the CPU into our world.
116 ret = boot_secondary(cpu, idle);
118 unsigned long timeout;
121 * CPU was successfully started, wait for it
122 * to come online or time out.
124 timeout = jiffies + HZ;
125 while (time_before(jiffies, timeout)) {
133 if (!cpu_online(cpu))
137 secondary_data.stack = NULL;
138 secondary_data.pgdir = 0;
141 clean_pmd_entry(pmd);
142 pgd_free(&init_mm, pgd);
145 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
148 * FIXME: We need to clean up the new idle thread. --rmk
155 #ifdef CONFIG_HOTPLUG_CPU
157 * __cpu_disable runs on the processor to be shutdown.
159 int __cpu_disable(void)
161 unsigned int cpu = smp_processor_id();
162 struct task_struct *p;
165 ret = mach_cpu_disable(cpu);
170 * Take this CPU offline. Once we clear this, we can't return,
171 * and we must not schedule until we're ready to give up the cpu.
173 set_cpu_online(cpu, false);
176 * OK - migrate IRQs away from this CPU
181 * Stop the local timer for this CPU.
186 * Flush user cache and TLB mappings, and then remove this CPU
187 * from the vm mask set of all processes.
190 local_flush_tlb_all();
192 read_lock(&tasklist_lock);
193 for_each_process(p) {
195 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
197 read_unlock(&tasklist_lock);
203 * called on the thread which is asking for a CPU to be shutdown -
204 * waits until shutdown has completed, or it is timed out.
206 void __cpu_die(unsigned int cpu)
208 if (!platform_cpu_kill(cpu))
209 printk("CPU%u: unable to kill\n", cpu);
213 * Called from the idle thread for the CPU which has been shutdown.
215 * Note that we disable IRQs here, but do not re-enable them
216 * before returning to the caller. This is also the behaviour
217 * of the other hotplug-cpu capable cores, so presumably coming
218 * out of idle fixes this.
220 void __ref cpu_die(void)
222 unsigned int cpu = smp_processor_id();
228 * actual CPU shutdown procedure is at least platform (if not
231 platform_cpu_die(cpu);
234 * Do not return to the idle loop - jump back to the secondary
235 * cpu initialisation. There's some initialisation which needs
236 * to be repeated to undo the effects of taking the CPU offline.
238 __asm__("mov sp, %0\n"
239 " b secondary_start_kernel"
241 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
243 #endif /* CONFIG_HOTPLUG_CPU */
246 * This is the secondary CPU boot entry. We're using this CPUs
247 * idle thread stack, but a set of temporary page tables.
249 asmlinkage void __cpuinit secondary_start_kernel(void)
251 struct mm_struct *mm = &init_mm;
252 unsigned int cpu = smp_processor_id();
254 printk("CPU%u: Booted secondary processor\n", cpu);
257 * All kernel threads share the same mm context; grab a
258 * reference and switch to it.
260 atomic_inc(&mm->mm_users);
261 atomic_inc(&mm->mm_count);
262 current->active_mm = mm;
263 cpumask_set_cpu(cpu, mm_cpumask(mm));
264 cpu_switch_mm(mm->pgd, mm);
265 enter_lazy_tlb(mm, current);
266 local_flush_tlb_all();
272 * Give the platform a chance to do its own initialisation.
274 platform_secondary_init(cpu);
277 * Enable local interrupts.
279 notify_cpu_starting(cpu);
284 * Setup the percpu timer for this CPU.
286 percpu_timer_setup();
290 smp_store_cpu_info(cpu);
293 * OK, now it's safe to let the boot CPU continue
295 set_cpu_online(cpu, true);
298 * OK, it's off to the idle thread for us
304 * Called by both boot and secondaries to move global data into
305 * per-processor storage.
307 void __cpuinit smp_store_cpu_info(unsigned int cpuid)
309 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
311 cpu_info->loops_per_jiffy = loops_per_jiffy;
314 void __init smp_cpus_done(unsigned int max_cpus)
317 unsigned long bogosum = 0;
319 for_each_online_cpu(cpu)
320 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
322 printk(KERN_INFO "SMP: Total of %d processors activated "
323 "(%lu.%02lu BogoMIPS).\n",
325 bogosum / (500000/HZ),
326 (bogosum / (5000/HZ)) % 100);
329 void __init smp_prepare_boot_cpu(void)
331 unsigned int cpu = smp_processor_id();
333 per_cpu(cpu_data, cpu).idle = current;
336 static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
341 local_irq_save(flags);
343 for_each_cpu(cpu, mask) {
344 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
346 spin_lock(&ipi->lock);
347 ipi->bits |= 1 << msg;
348 spin_unlock(&ipi->lock);
352 * Call the platform specific cross-CPU call function.
354 smp_cross_call(mask);
356 local_irq_restore(flags);
359 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
361 send_ipi_message(mask, IPI_CALL_FUNC);
364 void arch_send_call_function_single_ipi(int cpu)
366 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
369 void show_ipi_list(struct seq_file *p)
375 for_each_present_cpu(cpu)
376 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
381 void show_local_irqs(struct seq_file *p)
385 seq_printf(p, "LOC: ");
387 for_each_present_cpu(cpu)
388 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
394 * Timer (local or broadcast) support
396 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
398 static void ipi_timer(void)
400 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
402 evt->event_handler(evt);
406 #ifdef CONFIG_LOCAL_TIMERS
407 asmlinkage void __exception do_local_timer(struct pt_regs *regs)
409 struct pt_regs *old_regs = set_irq_regs(regs);
410 int cpu = smp_processor_id();
412 if (local_timer_ack()) {
413 irq_stat[cpu].local_timer_irqs++;
417 set_irq_regs(old_regs);
421 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
422 static void smp_timer_broadcast(const struct cpumask *mask)
424 send_ipi_message(mask, IPI_TIMER);
427 static void broadcast_timer_set_mode(enum clock_event_mode mode,
428 struct clock_event_device *evt)
432 static void local_timer_setup(struct clock_event_device *evt)
434 evt->name = "dummy_timer";
435 evt->features = CLOCK_EVT_FEAT_ONESHOT |
436 CLOCK_EVT_FEAT_PERIODIC |
437 CLOCK_EVT_FEAT_DUMMY;
440 evt->set_mode = broadcast_timer_set_mode;
441 evt->broadcast = smp_timer_broadcast;
443 clockevents_register_device(evt);
447 void __cpuinit percpu_timer_setup(void)
449 unsigned int cpu = smp_processor_id();
450 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
452 evt->cpumask = cpumask_of(cpu);
454 local_timer_setup(evt);
457 static DEFINE_SPINLOCK(stop_lock);
460 * ipi_cpu_stop - handle IPI from smp_send_stop()
462 static void ipi_cpu_stop(unsigned int cpu)
464 spin_lock(&stop_lock);
465 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
467 spin_unlock(&stop_lock);
469 set_cpu_online(cpu, false);
479 * Main handler for inter-processor interrupts
481 * For ARM, the ipimask now only identifies a single
482 * category of IPI (Bit 1 IPIs have been replaced by a
483 * different mechanism):
485 * Bit 0 - Inter-processor function call
487 asmlinkage void __exception do_IPI(struct pt_regs *regs)
489 unsigned int cpu = smp_processor_id();
490 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
491 struct pt_regs *old_regs = set_irq_regs(regs);
498 spin_lock(&ipi->lock);
501 spin_unlock(&ipi->lock);
509 nextmsg = msgs & -msgs;
511 nextmsg = ffz(~nextmsg);
520 * nothing more to do - eveything is
521 * done on the interrupt return path
526 generic_smp_call_function_interrupt();
529 case IPI_CALL_FUNC_SINGLE:
530 generic_smp_call_function_single_interrupt();
538 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
545 set_irq_regs(old_regs);
548 void smp_send_reschedule(int cpu)
550 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
553 void smp_send_stop(void)
555 cpumask_t mask = cpu_online_map;
556 cpu_clear(smp_processor_id(), mask);
557 send_ipi_message(&mask, IPI_CPU_STOP);
563 int setup_profiling_timer(unsigned int multiplier)
569 on_each_cpu_mask(void (*func)(void *), void *info, int wait,
570 const struct cpumask *mask)
574 smp_call_function_many(mask, func, info, wait);
575 if (cpumask_test_cpu(smp_processor_id(), mask))
581 /**********************************************************************/
587 struct vm_area_struct *ta_vma;
588 unsigned long ta_start;
589 unsigned long ta_end;
592 static inline void ipi_flush_tlb_all(void *ignored)
594 local_flush_tlb_all();
597 static inline void ipi_flush_tlb_mm(void *arg)
599 struct mm_struct *mm = (struct mm_struct *)arg;
601 local_flush_tlb_mm(mm);
604 static inline void ipi_flush_tlb_page(void *arg)
606 struct tlb_args *ta = (struct tlb_args *)arg;
608 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
611 static inline void ipi_flush_tlb_kernel_page(void *arg)
613 struct tlb_args *ta = (struct tlb_args *)arg;
615 local_flush_tlb_kernel_page(ta->ta_start);
618 static inline void ipi_flush_tlb_range(void *arg)
620 struct tlb_args *ta = (struct tlb_args *)arg;
622 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
625 static inline void ipi_flush_tlb_kernel_range(void *arg)
627 struct tlb_args *ta = (struct tlb_args *)arg;
629 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
632 void flush_tlb_all(void)
634 if (tlb_ops_need_broadcast())
635 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
637 local_flush_tlb_all();
640 void flush_tlb_mm(struct mm_struct *mm)
642 if (tlb_ops_need_broadcast())
643 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
645 local_flush_tlb_mm(mm);
648 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
650 if (tlb_ops_need_broadcast()) {
654 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
656 local_flush_tlb_page(vma, uaddr);
659 void flush_tlb_kernel_page(unsigned long kaddr)
661 if (tlb_ops_need_broadcast()) {
664 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
666 local_flush_tlb_kernel_page(kaddr);
669 void flush_tlb_range(struct vm_area_struct *vma,
670 unsigned long start, unsigned long end)
672 if (tlb_ops_need_broadcast()) {
677 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
679 local_flush_tlb_range(vma, start, end);
682 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
684 if (tlb_ops_need_broadcast()) {
688 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
690 local_flush_tlb_kernel_range(start, end);