2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/threads.h>
27 #include <linux/module.h>
28 #include <linux/time.h>
29 #include <linux/timex.h>
30 #include <linux/sched.h>
31 #include <linux/cpumask.h>
32 #include <linux/cpu.h>
33 #include <linux/err.h>
35 #include <asm/atomic.h>
37 #include <asm/processor.h>
38 #include <asm/system.h>
39 #include <asm/mmu_context.h>
42 #ifdef CONFIG_MIPS_MT_SMTC
43 #include <asm/mipsmtregs.h>
44 #endif /* CONFIG_MIPS_MT_SMTC */
46 cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
47 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
48 cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
49 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
50 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52 EXPORT_SYMBOL(phys_cpu_present_map);
53 EXPORT_SYMBOL(cpu_online_map);
55 extern void __init calibrate_delay(void);
56 extern void cpu_idle(void);
59 * First C code run on the secondary CPUs after being started up by
62 asmlinkage __cpuinit void start_secondary(void)
66 #ifdef CONFIG_MIPS_MT_SMTC
67 /* Only do cpu_probe for first TC of CPU */
68 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
69 #endif /* CONFIG_MIPS_MT_SMTC */
73 prom_init_secondary();
76 * XXX parity protection should be folded in here when it's converted
77 * to an option instead of something based on .cputype
82 cpu = smp_processor_id();
83 cpu_data[cpu].udelay_val = loops_per_jiffy;
87 cpu_set(cpu, cpu_callin_map);
92 DEFINE_SPINLOCK(smp_call_lock);
94 struct call_data_struct *call_data;
97 * Run a function on all other CPUs.
98 * <func> The function to run. This must be fast and non-blocking.
99 * <info> An arbitrary pointer to pass to the function.
100 * <retry> If true, keep retrying until ready.
101 * <wait> If true, wait until function has completed on other CPUs.
102 * [RETURNS] 0 on success, else a negative status code.
104 * Does not return until remote CPUs are nearly ready to execute <func>
105 * or are or have executed.
107 * You must not call this function with disabled interrupts or from a
108 * hardware interrupt handler or from a bottom half handler:
112 * smp_call_function()
115 * Wait for all cpus to acknowledge IPI
116 * CPU A has not responded, spin waiting
117 * for cpu A to respond, holding call_lock
118 * smp_call_function()
119 * Spin waiting for call_lock
122 int smp_call_function (void (*func) (void *info), void *info, int retry,
125 struct call_data_struct data;
126 int i, cpus = num_online_cpus() - 1;
127 int cpu = smp_processor_id();
130 * Can die spectacularly if this CPU isn't yet marked online
132 BUG_ON(!cpu_online(cpu));
137 /* Can deadlock when called with interrupts disabled */
138 WARN_ON(irqs_disabled());
142 atomic_set(&data.started, 0);
145 atomic_set(&data.finished, 0);
147 spin_lock(&smp_call_lock);
151 /* Send a message to all other CPUs and wait for them to respond */
152 for_each_online_cpu(i)
154 core_send_ipi(i, SMP_CALL_FUNCTION);
156 /* Wait for response */
157 /* FIXME: lock-up detection, backtrace on lock-up */
158 while (atomic_read(&data.started) != cpus)
162 while (atomic_read(&data.finished) != cpus)
165 spin_unlock(&smp_call_lock);
171 void smp_call_function_interrupt(void)
173 void (*func) (void *info) = call_data->func;
174 void *info = call_data->info;
175 int wait = call_data->wait;
178 * Notify initiating CPU that I've grabbed the data and am
179 * about to execute the function.
182 atomic_inc(&call_data->started);
185 * At this point the info structure may be out of scope unless wait==1.
193 atomic_inc(&call_data->finished);
197 static void stop_this_cpu(void *dummy)
202 cpu_clear(smp_processor_id(), cpu_online_map);
203 local_irq_enable(); /* May need to service _machine_restart IPI */
204 for (;;); /* Wait if available. */
207 void smp_send_stop(void)
209 smp_call_function(stop_this_cpu, NULL, 1, 0);
212 void __init smp_cpus_done(unsigned int max_cpus)
217 /* called from main before smp_init() */
218 void __init smp_prepare_cpus(unsigned int max_cpus)
220 init_new_context(current, &init_mm);
221 current_thread_info()->cpu = 0;
222 plat_prepare_cpus(max_cpus);
223 #ifndef CONFIG_HOTPLUG_CPU
224 cpu_present_map = cpu_possible_map;
228 /* preload SMP state for boot cpu */
229 void __devinit smp_prepare_boot_cpu(void)
232 * This assumes that bootup is always handled by the processor
233 * with the logic and physical number 0.
235 __cpu_number_map[0] = 0;
236 __cpu_logical_map[0] = 0;
237 cpu_set(0, phys_cpu_present_map);
238 cpu_set(0, cpu_online_map);
239 cpu_set(0, cpu_callin_map);
243 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
244 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
245 * physical, not logical.
247 int __cpuinit __cpu_up(unsigned int cpu)
249 struct task_struct *idle;
252 * Processor goes to start_secondary(), sets online flag
253 * The following code is purely to make sure
254 * Linux can schedule processes on this slave.
256 idle = fork_idle(cpu);
258 panic(KERN_ERR "Fork failed for CPU %d", cpu);
260 prom_boot_secondary(cpu, idle);
263 * Trust is futile. We should really have timeouts ...
265 while (!cpu_isset(cpu, cpu_callin_map))
268 cpu_set(cpu, cpu_online_map);
273 /* Not really SMP stuff ... */
274 int setup_profiling_timer(unsigned int multiplier)
279 static void flush_tlb_all_ipi(void *info)
281 local_flush_tlb_all();
284 void flush_tlb_all(void)
286 on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
289 static void flush_tlb_mm_ipi(void *mm)
291 local_flush_tlb_mm((struct mm_struct *)mm);
295 * Special Variant of smp_call_function for use by TLB functions:
298 * o collapses to normal function call on UP kernels
299 * o collapses to normal function call on systems with a single shared
301 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
303 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
305 #ifndef CONFIG_MIPS_MT_SMTC
306 smp_call_function(func, info, 1, 1);
310 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
314 smp_on_other_tlbs(func, info);
321 * The following tlb flush calls are invoked when old translations are
322 * being torn down, or pte attributes are changing. For single threaded
323 * address spaces, a new context is obtained on the current cpu, and tlb
324 * context on other cpus are invalidated to force a new context allocation
325 * at switch_mm time, should the mm ever be used on other cpus. For
326 * multithreaded address spaces, intercpu interrupts have to be sent.
327 * Another case where intercpu interrupts are required is when the target
328 * mm might be active on another cpu (eg debuggers doing the flushes on
329 * behalf of debugees, kswapd stealing pages from another process etc).
333 void flush_tlb_mm(struct mm_struct *mm)
337 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
338 smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
341 for (i = 0; i < num_online_cpus(); i++)
342 if (smp_processor_id() != i)
343 cpu_context(i, mm) = 0;
345 local_flush_tlb_mm(mm);
350 struct flush_tlb_data {
351 struct vm_area_struct *vma;
356 static void flush_tlb_range_ipi(void *info)
358 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
360 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
363 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
365 struct mm_struct *mm = vma->vm_mm;
368 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
369 struct flush_tlb_data fd;
374 smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
377 for (i = 0; i < num_online_cpus(); i++)
378 if (smp_processor_id() != i)
379 cpu_context(i, mm) = 0;
381 local_flush_tlb_range(vma, start, end);
385 static void flush_tlb_kernel_range_ipi(void *info)
387 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
389 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
392 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
394 struct flush_tlb_data fd;
398 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
401 static void flush_tlb_page_ipi(void *info)
403 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
405 local_flush_tlb_page(fd->vma, fd->addr1);
408 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
411 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
412 struct flush_tlb_data fd;
416 smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
419 for (i = 0; i < num_online_cpus(); i++)
420 if (smp_processor_id() != i)
421 cpu_context(i, vma->vm_mm) = 0;
423 local_flush_tlb_page(vma, page);
427 static void flush_tlb_one_ipi(void *info)
429 unsigned long vaddr = (unsigned long) info;
431 local_flush_tlb_one(vaddr);
434 void flush_tlb_one(unsigned long vaddr)
436 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
439 EXPORT_SYMBOL(flush_tlb_page);
440 EXPORT_SYMBOL(flush_tlb_one);