2 * Generic helpers for smp ipi calls
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
6 #include <linux/rcupdate.h>
7 #include <linux/rculist.h>
8 #include <linux/module.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
11 #include <linux/smp.h>
12 #include <linux/cpu.h>
14 static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
17 struct list_head queue;
19 } call_function __cacheline_aligned_in_smp =
21 .queue = LIST_HEAD_INIT(call_function.queue),
22 .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
29 struct call_function_data {
30 struct call_single_data csd;
33 cpumask_var_t cpumask;
36 struct call_single_queue {
37 struct list_head list;
41 static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
42 .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
46 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
48 long cpu = (long)hcpu;
49 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
53 case CPU_UP_PREPARE_FROZEN:
54 if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
59 #ifdef CONFIG_CPU_HOTPLUG
61 case CPU_UP_CANCELED_FROZEN:
65 free_cpumask_var(cfd->cpumask);
73 static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd,
77 static int __cpuinit init_call_single_data(void)
79 void *cpu = (void *)(long)smp_processor_id();
82 for_each_possible_cpu(i) {
83 struct call_single_queue *q = &per_cpu(call_single_queue, i);
85 spin_lock_init(&q->lock);
86 INIT_LIST_HEAD(&q->list);
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier);
94 early_initcall(init_call_single_data);
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
99 * For non-synchronous ipi calls the csd can still be in use by the
100 * previous function call. For multi-cpu calls its even more interesting
101 * as we'll have to ensure no other cpu is observing our csd.
103 static void csd_lock_wait(struct call_single_data *data)
105 while (data->flags & CSD_FLAG_LOCK)
109 static void csd_lock(struct call_single_data *data)
112 data->flags = CSD_FLAG_LOCK;
115 * prevent CPU from reordering the above assignment
116 * to ->flags with any subsequent assignments to other
117 * fields of the specified call_single_data structure:
122 static void csd_unlock(struct call_single_data *data)
124 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
127 * ensure we're all done before releasing data:
131 data->flags &= ~CSD_FLAG_LOCK;
135 * Insert a previously allocated call_single_data element
136 * for execution on the given CPU. data must already have
137 * ->func, ->info, and ->flags set.
140 void generic_exec_single(int cpu, struct call_single_data *data, int wait)
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
146 spin_lock_irqsave(&dst->lock, flags);
147 ipi = list_empty(&dst->list);
148 list_add_tail(&data->list, &dst->list);
149 spin_unlock_irqrestore(&dst->lock, flags);
152 * The list addition should be visible before sending the IPI
153 * handler locks the list to pull the entry off it because of
154 * normal cache coherency rules implied by spinlocks.
156 * If IPIs can go out of order to the cache coherency protocol
157 * in an architecture, sufficient synchronisation should be added
158 * to arch code to make it appear to obey cache coherency WRT
159 * locking and barrier primitives. Generic code isn't really
160 * equipped to do the right thing...
163 arch_send_call_function_single_ipi(cpu);
170 * Invoked by arch to handle an IPI for call function. Must be called with
171 * interrupts disabled.
173 void generic_smp_call_function_interrupt(void)
175 struct call_function_data *data;
179 * Ensure entry is visible on call_function_queue after we have
180 * entered the IPI. See comment in smp_call_function_many.
181 * If we don't have this, then we may miss an entry on the list
182 * and never get another IPI to process it.
187 * It's ok to use list_for_each_rcu() here even though we may
188 * delete 'pos', since list_del_rcu() doesn't clear ->next
190 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
193 spin_lock(&data->lock);
194 if (!cpumask_test_cpu(cpu, data->cpumask)) {
195 spin_unlock(&data->lock);
198 cpumask_clear_cpu(cpu, data->cpumask);
199 spin_unlock(&data->lock);
201 data->csd.func(data->csd.info);
203 spin_lock(&data->lock);
204 WARN_ON(data->refs == 0);
207 spin_lock(&call_function.lock);
208 list_del_rcu(&data->csd.list);
209 spin_unlock(&call_function.lock);
211 spin_unlock(&data->lock);
216 csd_unlock(&data->csd);
223 * Invoked by arch to handle an IPI for call function single. Must be
224 * called from the arch with interrupts disabled.
226 void generic_smp_call_function_single_interrupt(void)
228 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
229 unsigned int data_flags;
233 list_replace_init(&q->list, &list);
234 spin_unlock(&q->lock);
236 while (!list_empty(&list)) {
237 struct call_single_data *data;
239 data = list_entry(list.next, struct call_single_data, list);
240 list_del(&data->list);
243 * 'data' can be invalid after this call if flags == 0
244 * (when called through generic_exec_single()),
245 * so save them away before making the call:
247 data_flags = data->flags;
249 data->func(data->info);
252 * Unlocked CSDs are valid through generic_exec_single():
254 if (data_flags & CSD_FLAG_LOCK)
259 static DEFINE_PER_CPU(struct call_single_data, csd_data);
262 * smp_call_function_single - Run a function on a specific CPU
263 * @func: The function to run. This must be fast and non-blocking.
264 * @info: An arbitrary pointer to pass to the function.
265 * @wait: If true, wait until function has completed on other CPUs.
267 * Returns 0 on success, else a negative status code. Note that @wait
268 * will be implicitly turned on in case of allocation failures, since
269 * we fall back to on-stack allocation.
271 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
274 struct call_single_data d = {
282 * prevent preemption and reschedule on another processor,
283 * as well as CPU removal
285 this_cpu = get_cpu();
287 /* Can deadlock when called with interrupts disabled */
288 WARN_ON(irqs_disabled());
290 if (cpu == this_cpu) {
291 local_irq_save(flags);
293 local_irq_restore(flags);
295 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
296 struct call_single_data *data = &d;
299 data = &__get_cpu_var(csd_data);
305 generic_exec_single(cpu, data, wait);
307 err = -ENXIO; /* CPU not online */
315 EXPORT_SYMBOL(smp_call_function_single);
318 * __smp_call_function_single(): Run a function on another CPU
319 * @cpu: The CPU to run on.
320 * @data: Pre-allocated and setup data structure
322 * Like smp_call_function_single(), but allow caller to pass in a
323 * pre-allocated data structure. Useful for embedding @data inside
324 * other structures, for instance.
326 void __smp_call_function_single(int cpu, struct call_single_data *data,
331 /* Can deadlock when called with interrupts disabled */
332 WARN_ON(wait && irqs_disabled());
334 generic_exec_single(cpu, data, wait);
337 /* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
339 #ifndef arch_send_call_function_ipi_mask
340 # define arch_send_call_function_ipi_mask(maskp) \
341 arch_send_call_function_ipi(*(maskp))
345 * smp_call_function_many(): Run a function on a set of other CPUs.
346 * @mask: The set of cpus to run on (only runs on online subset).
347 * @func: The function to run. This must be fast and non-blocking.
348 * @info: An arbitrary pointer to pass to the function.
349 * @wait: If true, wait (atomically) until function has completed
352 * If @wait is true, then returns once @func has returned. Note that @wait
353 * will be implicitly turned on in case of allocation failures, since
354 * we fall back to on-stack allocation.
356 * You must not call this function with disabled interrupts or from a
357 * hardware interrupt handler or from a bottom half handler. Preemption
358 * must be disabled when calling this function.
360 void smp_call_function_many(const struct cpumask *mask,
361 void (*func)(void *), void *info, bool wait)
363 struct call_function_data *data;
365 int cpu, next_cpu, this_cpu = smp_processor_id();
367 /* Can deadlock when called with interrupts disabled */
368 WARN_ON(irqs_disabled());
370 /* So, what's a CPU they want? Ignoring this one. */
371 cpu = cpumask_first_and(mask, cpu_online_mask);
373 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
375 /* No online cpus? We're done. */
376 if (cpu >= nr_cpu_ids)
379 /* Do we have another CPU which isn't us? */
380 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
381 if (next_cpu == this_cpu)
382 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
384 /* Fastpath: do that cpu by itself. */
385 if (next_cpu >= nr_cpu_ids) {
386 smp_call_function_single(cpu, func, info, wait);
390 data = &__get_cpu_var(cfd_data);
391 csd_lock(&data->csd);
393 spin_lock_irqsave(&data->lock, flags);
394 data->csd.func = func;
395 data->csd.info = info;
396 cpumask_and(data->cpumask, mask, cpu_online_mask);
397 cpumask_clear_cpu(this_cpu, data->cpumask);
398 data->refs = cpumask_weight(data->cpumask);
400 spin_lock(&call_function.lock);
402 * Place entry at the _HEAD_ of the list, so that any cpu still
403 * observing the entry in generic_smp_call_function_interrupt()
404 * will not miss any other list entries:
406 list_add_rcu(&data->csd.list, &call_function.queue);
407 spin_unlock(&call_function.lock);
409 spin_unlock_irqrestore(&data->lock, flags);
412 * Make the list addition visible before sending the ipi.
413 * (IPIs must obey or appear to obey normal Linux cache
414 * coherency rules -- see comment in generic_exec_single).
418 /* Send a message to all CPUs in the map */
419 arch_send_call_function_ipi_mask(data->cpumask);
421 /* Optionally wait for the CPUs to complete */
423 csd_lock_wait(&data->csd);
425 EXPORT_SYMBOL(smp_call_function_many);
428 * smp_call_function(): Run a function on all other CPUs.
429 * @func: The function to run. This must be fast and non-blocking.
430 * @info: An arbitrary pointer to pass to the function.
431 * @wait: If true, wait (atomically) until function has completed
436 * If @wait is true, then returns once @func has returned; otherwise
437 * it returns just before the target cpu calls @func. In case of allocation
438 * failure, @wait will be implicitly turned on.
440 * You must not call this function with disabled interrupts or from a
441 * hardware interrupt handler or from a bottom half handler.
443 int smp_call_function(void (*func)(void *), void *info, int wait)
446 smp_call_function_many(cpu_online_mask, func, info, wait);
451 EXPORT_SYMBOL(smp_call_function);
453 void ipi_call_lock(void)
455 spin_lock(&call_function.lock);
458 void ipi_call_unlock(void)
460 spin_unlock(&call_function.lock);
463 void ipi_call_lock_irq(void)
465 spin_lock_irq(&call_function.lock);
468 void ipi_call_unlock_irq(void)
470 spin_unlock_irq(&call_function.lock);