2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
37 #include <linux/atomic.h>
39 #include <asm/processor.h>
41 #include <asm/r4k-timer.h>
42 #include <asm/mmu_context.h>
44 #include <asm/setup.h>
46 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
49 EXPORT_SYMBOL(__cpu_number_map);
51 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52 EXPORT_SYMBOL(__cpu_logical_map);
54 /* Number of TCs (or siblings in Intel speak) per CPU core */
55 int smp_num_siblings = 1;
56 EXPORT_SYMBOL(smp_num_siblings);
58 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
59 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
60 EXPORT_SYMBOL(cpu_sibling_map);
62 /* representing cpus for which sibling maps can be computed */
63 static cpumask_t cpu_sibling_setup_map;
65 cpumask_t cpu_coherent_mask;
67 static inline void set_cpu_sibling_map(int cpu)
71 cpu_set(cpu, cpu_sibling_setup_map);
73 if (smp_num_siblings > 1) {
74 for_each_cpu_mask(i, cpu_sibling_setup_map) {
75 if (cpu_data[cpu].core == cpu_data[i].core) {
76 cpu_set(i, cpu_sibling_map[cpu]);
77 cpu_set(cpu, cpu_sibling_map[i]);
81 cpu_set(cpu, cpu_sibling_map[cpu]);
84 struct plat_smp_ops *mp_ops;
85 EXPORT_SYMBOL(mp_ops);
87 void register_smp_ops(struct plat_smp_ops *ops)
90 printk(KERN_WARNING "Overriding previously set SMP ops\n");
96 * First C code run on the secondary CPUs after being started up by
99 asmlinkage void start_secondary(void)
105 per_cpu_trap_init(false);
106 mips_clockevent_init();
107 mp_ops->init_secondary();
110 * XXX parity protection should be folded in here when it's converted
111 * to an option instead of something based on .cputype
116 cpu = smp_processor_id();
117 cpu_data[cpu].udelay_val = loops_per_jiffy;
119 cpu_set(cpu, cpu_coherent_mask);
120 notify_cpu_starting(cpu);
122 set_cpu_online(cpu, true);
124 set_cpu_sibling_map(cpu);
126 cpu_set(cpu, cpu_callin_map);
128 synchronise_count_slave(cpu);
131 * irq will be enabled in ->smp_finish(), enabling it too early
134 WARN_ON_ONCE(!irqs_disabled());
135 mp_ops->smp_finish();
137 cpu_startup_entry(CPUHP_ONLINE);
141 * Call into both interrupt handlers, as we share the IPI for them
143 void __irq_entry smp_call_function_interrupt(void)
146 generic_smp_call_function_interrupt();
150 static void stop_this_cpu(void *dummy)
155 set_cpu_online(smp_processor_id(), false);
158 (*cpu_wait)(); /* Wait if available. */
162 void smp_send_stop(void)
164 smp_call_function(stop_this_cpu, NULL, 0);
167 void __init smp_cpus_done(unsigned int max_cpus)
171 /* called from main before smp_init() */
172 void __init smp_prepare_cpus(unsigned int max_cpus)
174 init_new_context(current, &init_mm);
175 current_thread_info()->cpu = 0;
176 mp_ops->prepare_cpus(max_cpus);
177 set_cpu_sibling_map(0);
178 #ifndef CONFIG_HOTPLUG_CPU
179 init_cpu_present(cpu_possible_mask);
181 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
184 /* preload SMP state for boot cpu */
185 void smp_prepare_boot_cpu(void)
187 set_cpu_possible(0, true);
188 set_cpu_online(0, true);
189 cpu_set(0, cpu_callin_map);
192 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
194 mp_ops->boot_secondary(cpu, tidle);
197 * Trust is futile. We should really have timeouts ...
199 while (!cpu_isset(cpu, cpu_callin_map))
202 synchronise_count_master(cpu);
206 /* Not really SMP stuff ... */
207 int setup_profiling_timer(unsigned int multiplier)
212 static void flush_tlb_all_ipi(void *info)
214 local_flush_tlb_all();
217 void flush_tlb_all(void)
219 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
222 static void flush_tlb_mm_ipi(void *mm)
224 local_flush_tlb_mm((struct mm_struct *)mm);
228 * Special Variant of smp_call_function for use by TLB functions:
231 * o collapses to normal function call on UP kernels
232 * o collapses to normal function call on systems with a single shared
235 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
237 smp_call_function(func, info, 1);
240 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
244 smp_on_other_tlbs(func, info);
251 * The following tlb flush calls are invoked when old translations are
252 * being torn down, or pte attributes are changing. For single threaded
253 * address spaces, a new context is obtained on the current cpu, and tlb
254 * context on other cpus are invalidated to force a new context allocation
255 * at switch_mm time, should the mm ever be used on other cpus. For
256 * multithreaded address spaces, intercpu interrupts have to be sent.
257 * Another case where intercpu interrupts are required is when the target
258 * mm might be active on another cpu (eg debuggers doing the flushes on
259 * behalf of debugees, kswapd stealing pages from another process etc).
263 void flush_tlb_mm(struct mm_struct *mm)
267 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
268 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
272 for_each_online_cpu(cpu) {
273 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
274 cpu_context(cpu, mm) = 0;
277 local_flush_tlb_mm(mm);
282 struct flush_tlb_data {
283 struct vm_area_struct *vma;
288 static void flush_tlb_range_ipi(void *info)
290 struct flush_tlb_data *fd = info;
292 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
295 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
297 struct mm_struct *mm = vma->vm_mm;
300 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
301 struct flush_tlb_data fd = {
307 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
311 for_each_online_cpu(cpu) {
312 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
313 cpu_context(cpu, mm) = 0;
316 local_flush_tlb_range(vma, start, end);
320 static void flush_tlb_kernel_range_ipi(void *info)
322 struct flush_tlb_data *fd = info;
324 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
327 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
329 struct flush_tlb_data fd = {
334 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
337 static void flush_tlb_page_ipi(void *info)
339 struct flush_tlb_data *fd = info;
341 local_flush_tlb_page(fd->vma, fd->addr1);
344 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
347 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
348 struct flush_tlb_data fd = {
353 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
357 for_each_online_cpu(cpu) {
358 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
359 cpu_context(cpu, vma->vm_mm) = 0;
362 local_flush_tlb_page(vma, page);
366 static void flush_tlb_one_ipi(void *info)
368 unsigned long vaddr = (unsigned long) info;
370 local_flush_tlb_one(vaddr);
373 void flush_tlb_one(unsigned long vaddr)
375 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
378 EXPORT_SYMBOL(flush_tlb_page);
379 EXPORT_SYMBOL(flush_tlb_one);
381 #if defined(CONFIG_KEXEC)
382 void (*dump_ipi_function_ptr)(void *) = NULL;
383 void dump_send_ipi(void (*dump_ipi_callback)(void *))
386 int cpu = smp_processor_id();
388 dump_ipi_function_ptr = dump_ipi_callback;
390 for_each_online_cpu(i)
392 mp_ops->send_ipi_single(i, SMP_DUMP);
395 EXPORT_SYMBOL(dump_send_ipi);
398 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
400 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
401 static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
403 void tick_broadcast(const struct cpumask *mask)
406 struct call_single_data *csd;
409 for_each_cpu(cpu, mask) {
410 count = &per_cpu(tick_broadcast_count, cpu);
411 csd = &per_cpu(tick_broadcast_csd, cpu);
413 if (atomic_inc_return(count) == 1)
414 smp_call_function_single_async(cpu, csd);
418 static void tick_broadcast_callee(void *info)
420 int cpu = smp_processor_id();
421 tick_receive_broadcast();
422 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
425 static int __init tick_broadcast_init(void)
427 struct call_single_data *csd;
430 for (cpu = 0; cpu < NR_CPUS; cpu++) {
431 csd = &per_cpu(tick_broadcast_csd, cpu);
432 csd->func = tick_broadcast_callee;
437 early_initcall(tick_broadcast_init);
439 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */