2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
25 #include <linux/percpu.h>
26 #include <linux/clockchips.h>
28 #include <asm/atomic.h>
29 #include <asm/cacheflush.h>
31 #include <asm/mmu_context.h>
32 #include <asm/pgtable.h>
33 #include <asm/pgalloc.h>
34 #include <asm/processor.h>
35 #include <asm/tlbflush.h>
36 #include <asm/ptrace.h>
37 #include <asm/localtimer.h>
40 * as from 2.5, kernels no longer have an init_tasks structure
41 * so we need some other way of telling a new secondary core
42 * where to place its SVC stack
44 struct secondary_data secondary_data;
47 * structures for inter-processor calls
48 * - A collection of single bit ipi messages.
52 unsigned long ipi_count;
56 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
57 .lock = SPIN_LOCK_UNLOCKED,
68 int __cpuinit __cpu_up(unsigned int cpu)
70 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
71 struct task_struct *idle = ci->idle;
77 * Spawn a new process manually, if not already done.
78 * Grab a pointer to its task struct so we can mess with it
81 idle = fork_idle(cpu);
83 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
90 * Allocate initial page tables to allow the new CPU to
91 * enable the MMU safely. This essentially means a set
92 * of our "standard" page tables, with the addition of
93 * a 1:1 mapping for the physical address of the kernel.
95 pgd = pgd_alloc(&init_mm);
96 pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET);
97 *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) |
98 PMD_TYPE_SECT | PMD_SECT_AP_WRITE);
102 * We need to tell the secondary core where to find
103 * its stack and the page tables.
105 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
106 secondary_data.pgdir = virt_to_phys(pgd);
110 * Now bring the CPU into our world.
112 ret = boot_secondary(cpu, idle);
114 unsigned long timeout;
117 * CPU was successfully started, wait for it
118 * to come online or time out.
120 timeout = jiffies + HZ;
121 while (time_before(jiffies, timeout)) {
129 if (!cpu_online(cpu))
133 secondary_data.stack = NULL;
134 secondary_data.pgdir = 0;
137 clean_pmd_entry(pmd);
138 pgd_free(&init_mm, pgd);
141 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
144 * FIXME: We need to clean up the new idle thread. --rmk
151 #ifdef CONFIG_HOTPLUG_CPU
153 * __cpu_disable runs on the processor to be shutdown.
155 int __cpuexit __cpu_disable(void)
157 unsigned int cpu = smp_processor_id();
158 struct task_struct *p;
161 ret = mach_cpu_disable(cpu);
166 * Take this CPU offline. Once we clear this, we can't return,
167 * and we must not schedule until we're ready to give up the cpu.
169 set_cpu_online(cpu, false);
172 * OK - migrate IRQs away from this CPU
177 * Stop the local timer for this CPU.
182 * Flush user cache and TLB mappings, and then remove this CPU
183 * from the vm mask set of all processes.
186 local_flush_tlb_all();
188 read_lock(&tasklist_lock);
189 for_each_process(p) {
191 cpu_clear(cpu, p->mm->cpu_vm_mask);
193 read_unlock(&tasklist_lock);
199 * called on the thread which is asking for a CPU to be shutdown -
200 * waits until shutdown has completed, or it is timed out.
202 void __cpuexit __cpu_die(unsigned int cpu)
204 if (!platform_cpu_kill(cpu))
205 printk("CPU%u: unable to kill\n", cpu);
209 * Called from the idle thread for the CPU which has been shutdown.
211 * Note that we disable IRQs here, but do not re-enable them
212 * before returning to the caller. This is also the behaviour
213 * of the other hotplug-cpu capable cores, so presumably coming
214 * out of idle fixes this.
216 void __cpuexit cpu_die(void)
218 unsigned int cpu = smp_processor_id();
224 * actual CPU shutdown procedure is at least platform (if not
227 platform_cpu_die(cpu);
230 * Do not return to the idle loop - jump back to the secondary
231 * cpu initialisation. There's some initialisation which needs
232 * to be repeated to undo the effects of taking the CPU offline.
234 __asm__("mov sp, %0\n"
235 " b secondary_start_kernel"
237 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
239 #endif /* CONFIG_HOTPLUG_CPU */
242 * This is the secondary CPU boot entry. We're using this CPUs
243 * idle thread stack, but a set of temporary page tables.
245 asmlinkage void __cpuinit secondary_start_kernel(void)
247 struct mm_struct *mm = &init_mm;
248 unsigned int cpu = smp_processor_id();
250 printk("CPU%u: Booted secondary processor\n", cpu);
253 * All kernel threads share the same mm context; grab a
254 * reference and switch to it.
256 atomic_inc(&mm->mm_users);
257 atomic_inc(&mm->mm_count);
258 current->active_mm = mm;
259 cpu_set(cpu, mm->cpu_vm_mask);
260 cpu_switch_mm(mm->pgd, mm);
261 enter_lazy_tlb(mm, current);
262 local_flush_tlb_all();
268 * Give the platform a chance to do its own initialisation.
270 platform_secondary_init(cpu);
273 * Enable local interrupts.
275 notify_cpu_starting(cpu);
280 * Setup the percpu timer for this CPU.
282 percpu_timer_setup();
286 smp_store_cpu_info(cpu);
289 * OK, now it's safe to let the boot CPU continue
291 set_cpu_online(cpu, true);
294 * OK, it's off to the idle thread for us
300 * Called by both boot and secondaries to move global data into
301 * per-processor storage.
303 void __cpuinit smp_store_cpu_info(unsigned int cpuid)
305 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
307 cpu_info->loops_per_jiffy = loops_per_jiffy;
310 void __init smp_cpus_done(unsigned int max_cpus)
313 unsigned long bogosum = 0;
315 for_each_online_cpu(cpu)
316 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
318 printk(KERN_INFO "SMP: Total of %d processors activated "
319 "(%lu.%02lu BogoMIPS).\n",
321 bogosum / (500000/HZ),
322 (bogosum / (5000/HZ)) % 100);
325 void __init smp_prepare_boot_cpu(void)
327 unsigned int cpu = smp_processor_id();
329 per_cpu(cpu_data, cpu).idle = current;
332 static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
337 local_irq_save(flags);
339 for_each_cpu(cpu, mask) {
340 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
342 spin_lock(&ipi->lock);
343 ipi->bits |= 1 << msg;
344 spin_unlock(&ipi->lock);
348 * Call the platform specific cross-CPU call function.
350 smp_cross_call(mask);
352 local_irq_restore(flags);
355 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
357 send_ipi_message(mask, IPI_CALL_FUNC);
360 void arch_send_call_function_single_ipi(int cpu)
362 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
365 void show_ipi_list(struct seq_file *p)
371 for_each_present_cpu(cpu)
372 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
377 void show_local_irqs(struct seq_file *p)
381 seq_printf(p, "LOC: ");
383 for_each_present_cpu(cpu)
384 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
390 * Timer (local or broadcast) support
392 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
394 static void ipi_timer(void)
396 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
398 evt->event_handler(evt);
402 #ifdef CONFIG_LOCAL_TIMERS
403 asmlinkage void __exception do_local_timer(struct pt_regs *regs)
405 struct pt_regs *old_regs = set_irq_regs(regs);
406 int cpu = smp_processor_id();
408 if (local_timer_ack()) {
409 irq_stat[cpu].local_timer_irqs++;
413 set_irq_regs(old_regs);
417 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
418 static void smp_timer_broadcast(const struct cpumask *mask)
420 send_ipi_message(mask, IPI_TIMER);
423 static void broadcast_timer_set_mode(enum clock_event_mode mode,
424 struct clock_event_device *evt)
428 static void local_timer_setup(struct clock_event_device *evt)
430 evt->name = "dummy_timer";
431 evt->features = CLOCK_EVT_FEAT_ONESHOT |
432 CLOCK_EVT_FEAT_PERIODIC |
433 CLOCK_EVT_FEAT_DUMMY;
436 evt->set_mode = broadcast_timer_set_mode;
437 evt->broadcast = smp_timer_broadcast;
439 clockevents_register_device(evt);
443 void __cpuinit percpu_timer_setup(void)
445 unsigned int cpu = smp_processor_id();
446 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
448 evt->cpumask = cpumask_of(cpu);
450 local_timer_setup(evt);
453 static DEFINE_SPINLOCK(stop_lock);
456 * ipi_cpu_stop - handle IPI from smp_send_stop()
458 static void ipi_cpu_stop(unsigned int cpu)
460 spin_lock(&stop_lock);
461 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
463 spin_unlock(&stop_lock);
465 set_cpu_online(cpu, false);
475 * Main handler for inter-processor interrupts
477 * For ARM, the ipimask now only identifies a single
478 * category of IPI (Bit 1 IPIs have been replaced by a
479 * different mechanism):
481 * Bit 0 - Inter-processor function call
483 asmlinkage void __exception do_IPI(struct pt_regs *regs)
485 unsigned int cpu = smp_processor_id();
486 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
487 struct pt_regs *old_regs = set_irq_regs(regs);
494 spin_lock(&ipi->lock);
497 spin_unlock(&ipi->lock);
505 nextmsg = msgs & -msgs;
507 nextmsg = ffz(~nextmsg);
516 * nothing more to do - eveything is
517 * done on the interrupt return path
522 generic_smp_call_function_interrupt();
525 case IPI_CALL_FUNC_SINGLE:
526 generic_smp_call_function_single_interrupt();
534 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
541 set_irq_regs(old_regs);
544 void smp_send_reschedule(int cpu)
546 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
549 void smp_send_stop(void)
551 cpumask_t mask = cpu_online_map;
552 cpu_clear(smp_processor_id(), mask);
553 send_ipi_message(&mask, IPI_CPU_STOP);
559 int setup_profiling_timer(unsigned int multiplier)
565 on_each_cpu_mask(void (*func)(void *), void *info, int wait,
566 const struct cpumask *mask)
570 smp_call_function_many(mask, func, info, wait);
571 if (cpumask_test_cpu(smp_processor_id(), mask))
577 /**********************************************************************/
583 struct vm_area_struct *ta_vma;
584 unsigned long ta_start;
585 unsigned long ta_end;
588 static inline void ipi_flush_tlb_all(void *ignored)
590 local_flush_tlb_all();
593 static inline void ipi_flush_tlb_mm(void *arg)
595 struct mm_struct *mm = (struct mm_struct *)arg;
597 local_flush_tlb_mm(mm);
600 static inline void ipi_flush_tlb_page(void *arg)
602 struct tlb_args *ta = (struct tlb_args *)arg;
604 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
607 static inline void ipi_flush_tlb_kernel_page(void *arg)
609 struct tlb_args *ta = (struct tlb_args *)arg;
611 local_flush_tlb_kernel_page(ta->ta_start);
614 static inline void ipi_flush_tlb_range(void *arg)
616 struct tlb_args *ta = (struct tlb_args *)arg;
618 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
621 static inline void ipi_flush_tlb_kernel_range(void *arg)
623 struct tlb_args *ta = (struct tlb_args *)arg;
625 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
628 void flush_tlb_all(void)
630 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
633 void flush_tlb_mm(struct mm_struct *mm)
635 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
638 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
645 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
648 void flush_tlb_kernel_page(unsigned long kaddr)
654 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
657 void flush_tlb_range(struct vm_area_struct *vma,
658 unsigned long start, unsigned long end)
666 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
669 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
676 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);