2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
28 #include <linux/gfp.h>
29 #include <linux/oom.h>
30 #include <linux/smpboot.h>
32 #define RCU_KTHREAD_PRIO 1
34 #ifdef CONFIG_RCU_BOOST
35 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
37 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
40 #ifdef CONFIG_RCU_NOCB_CPU
41 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
42 static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
43 static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
44 static char __initdata nocb_buf[NR_CPUS * 5];
45 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
48 * Check the RCU kernel configuration parameters and print informative
49 * messages about anything out of the ordinary. If you like #ifdef, you
50 * will love this function.
52 static void __init rcu_bootup_announce_oddness(void)
54 #ifdef CONFIG_RCU_TRACE
55 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
57 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
58 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
61 #ifdef CONFIG_RCU_FANOUT_EXACT
62 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
64 #ifdef CONFIG_RCU_FAST_NO_HZ
66 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
68 #ifdef CONFIG_PROVE_RCU
69 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
71 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
72 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
74 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
75 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
77 #if defined(CONFIG_RCU_CPU_STALL_INFO)
78 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
80 #if NUM_RCU_LVL_4 != 0
81 printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
83 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
84 printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
85 if (nr_cpu_ids != NR_CPUS)
86 printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
87 #ifdef CONFIG_RCU_NOCB_CPU
88 if (have_rcu_nocb_mask) {
89 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
90 pr_info("\tExperimental no-CBs CPUs: %s.\n", nocb_buf);
92 pr_info("\tExperimental polled no-CBs CPUs.\n");
94 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
97 #ifdef CONFIG_TREE_PREEMPT_RCU
99 struct rcu_state rcu_preempt_state =
100 RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
101 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
102 static struct rcu_state *rcu_state = &rcu_preempt_state;
104 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
107 * Tell them what RCU they are running.
109 static void __init rcu_bootup_announce(void)
111 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
112 rcu_bootup_announce_oddness();
116 * Return the number of RCU-preempt batches processed thus far
117 * for debug and statistics.
119 long rcu_batches_completed_preempt(void)
121 return rcu_preempt_state.completed;
123 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
126 * Return the number of RCU batches processed thus far for debug & stats.
128 long rcu_batches_completed(void)
130 return rcu_batches_completed_preempt();
132 EXPORT_SYMBOL_GPL(rcu_batches_completed);
135 * Force a quiescent state for preemptible RCU.
137 void rcu_force_quiescent_state(void)
139 force_quiescent_state(&rcu_preempt_state);
141 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
144 * Record a preemptible-RCU quiescent state for the specified CPU. Note
145 * that this just means that the task currently running on the CPU is
146 * not in a quiescent state. There might be any number of tasks blocked
147 * while in an RCU read-side critical section.
149 * Unlike the other rcu_*_qs() functions, callers to this function
150 * must disable irqs in order to protect the assignment to
151 * ->rcu_read_unlock_special.
153 static void rcu_preempt_qs(int cpu)
155 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
157 if (rdp->passed_quiesce == 0)
158 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
159 rdp->passed_quiesce = 1;
160 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
164 * We have entered the scheduler, and the current task might soon be
165 * context-switched away from. If this task is in an RCU read-side
166 * critical section, we will no longer be able to rely on the CPU to
167 * record that fact, so we enqueue the task on the blkd_tasks list.
168 * The task will dequeue itself when it exits the outermost enclosing
169 * RCU read-side critical section. Therefore, the current grace period
170 * cannot be permitted to complete until the blkd_tasks list entries
171 * predating the current grace period drain, in other words, until
172 * rnp->gp_tasks becomes NULL.
174 * Caller must disable preemption.
176 static void rcu_preempt_note_context_switch(int cpu)
178 struct task_struct *t = current;
180 struct rcu_data *rdp;
181 struct rcu_node *rnp;
183 if (t->rcu_read_lock_nesting > 0 &&
184 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
186 /* Possibly blocking in an RCU read-side critical section. */
187 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
189 raw_spin_lock_irqsave(&rnp->lock, flags);
190 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
191 t->rcu_blocked_node = rnp;
194 * If this CPU has already checked in, then this task
195 * will hold up the next grace period rather than the
196 * current grace period. Queue the task accordingly.
197 * If the task is queued for the current grace period
198 * (i.e., this CPU has not yet passed through a quiescent
199 * state for the current grace period), then as long
200 * as that task remains queued, the current grace period
201 * cannot end. Note that there is some uncertainty as
202 * to exactly when the current grace period started.
203 * We take a conservative approach, which can result
204 * in unnecessarily waiting on tasks that started very
205 * slightly after the current grace period began. C'est
208 * But first, note that the current CPU must still be
211 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
212 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
213 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
214 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
215 rnp->gp_tasks = &t->rcu_node_entry;
216 #ifdef CONFIG_RCU_BOOST
217 if (rnp->boost_tasks != NULL)
218 rnp->boost_tasks = rnp->gp_tasks;
219 #endif /* #ifdef CONFIG_RCU_BOOST */
221 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
222 if (rnp->qsmask & rdp->grpmask)
223 rnp->gp_tasks = &t->rcu_node_entry;
225 trace_rcu_preempt_task(rdp->rsp->name,
227 (rnp->qsmask & rdp->grpmask)
230 raw_spin_unlock_irqrestore(&rnp->lock, flags);
231 } else if (t->rcu_read_lock_nesting < 0 &&
232 t->rcu_read_unlock_special) {
235 * Complete exit from RCU read-side critical section on
236 * behalf of preempted instance of __rcu_read_unlock().
238 rcu_read_unlock_special(t);
242 * Either we were not in an RCU read-side critical section to
243 * begin with, or we have now recorded that critical section
244 * globally. Either way, we can now note a quiescent state
245 * for this CPU. Again, if we were in an RCU read-side critical
246 * section, and if that critical section was blocking the current
247 * grace period, then the fact that the task has been enqueued
248 * means that we continue to block the current grace period.
250 local_irq_save(flags);
252 local_irq_restore(flags);
256 * Check for preempted RCU readers blocking the current grace period
257 * for the specified rcu_node structure. If the caller needs a reliable
258 * answer, it must hold the rcu_node's ->lock.
260 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
262 return rnp->gp_tasks != NULL;
266 * Record a quiescent state for all tasks that were previously queued
267 * on the specified rcu_node structure and that were blocking the current
268 * RCU grace period. The caller must hold the specified rnp->lock with
269 * irqs disabled, and this lock is released upon return, but irqs remain
272 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
273 __releases(rnp->lock)
276 struct rcu_node *rnp_p;
278 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
279 raw_spin_unlock_irqrestore(&rnp->lock, flags);
280 return; /* Still need more quiescent states! */
286 * Either there is only one rcu_node in the tree,
287 * or tasks were kicked up to root rcu_node due to
288 * CPUs going offline.
290 rcu_report_qs_rsp(&rcu_preempt_state, flags);
294 /* Report up the rest of the hierarchy. */
296 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
297 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
298 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
302 * Advance a ->blkd_tasks-list pointer to the next entry, instead
303 * returning NULL if at the end of the list.
305 static struct list_head *rcu_next_node_entry(struct task_struct *t,
306 struct rcu_node *rnp)
308 struct list_head *np;
310 np = t->rcu_node_entry.next;
311 if (np == &rnp->blkd_tasks)
317 * Handle special cases during rcu_read_unlock(), such as needing to
318 * notify RCU core processing or task having blocked during the RCU
319 * read-side critical section.
321 void rcu_read_unlock_special(struct task_struct *t)
327 struct list_head *np;
328 #ifdef CONFIG_RCU_BOOST
329 struct rt_mutex *rbmp = NULL;
330 #endif /* #ifdef CONFIG_RCU_BOOST */
331 struct rcu_node *rnp;
334 /* NMI handlers cannot block and cannot safely manipulate state. */
338 local_irq_save(flags);
341 * If RCU core is waiting for this CPU to exit critical section,
342 * let it know that we have done so.
344 special = t->rcu_read_unlock_special;
345 if (special & RCU_READ_UNLOCK_NEED_QS) {
346 rcu_preempt_qs(smp_processor_id());
349 /* Hardware IRQ handlers cannot block. */
350 if (in_irq() || in_serving_softirq()) {
351 local_irq_restore(flags);
355 /* Clean up if blocked during RCU read-side critical section. */
356 if (special & RCU_READ_UNLOCK_BLOCKED) {
357 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
360 * Remove this task from the list it blocked on. The
361 * task can migrate while we acquire the lock, but at
362 * most one time. So at most two passes through loop.
365 rnp = t->rcu_blocked_node;
366 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
367 if (rnp == t->rcu_blocked_node)
369 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
371 empty = !rcu_preempt_blocked_readers_cgp(rnp);
372 empty_exp = !rcu_preempted_readers_exp(rnp);
373 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
374 np = rcu_next_node_entry(t, rnp);
375 list_del_init(&t->rcu_node_entry);
376 t->rcu_blocked_node = NULL;
377 trace_rcu_unlock_preempted_task("rcu_preempt",
379 if (&t->rcu_node_entry == rnp->gp_tasks)
381 if (&t->rcu_node_entry == rnp->exp_tasks)
383 #ifdef CONFIG_RCU_BOOST
384 if (&t->rcu_node_entry == rnp->boost_tasks)
385 rnp->boost_tasks = np;
386 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
387 if (t->rcu_boost_mutex) {
388 rbmp = t->rcu_boost_mutex;
389 t->rcu_boost_mutex = NULL;
391 #endif /* #ifdef CONFIG_RCU_BOOST */
394 * If this was the last task on the current list, and if
395 * we aren't waiting on any CPUs, report the quiescent state.
396 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
397 * so we must take a snapshot of the expedited state.
399 empty_exp_now = !rcu_preempted_readers_exp(rnp);
400 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
401 trace_rcu_quiescent_state_report("preempt_rcu",
408 rcu_report_unblock_qs_rnp(rnp, flags);
410 raw_spin_unlock_irqrestore(&rnp->lock, flags);
413 #ifdef CONFIG_RCU_BOOST
414 /* Unboost if we were boosted. */
416 rt_mutex_unlock(rbmp);
417 #endif /* #ifdef CONFIG_RCU_BOOST */
420 * If this was the last task on the expedited lists,
421 * then we need to report up the rcu_node hierarchy.
423 if (!empty_exp && empty_exp_now)
424 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
426 local_irq_restore(flags);
430 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
433 * Dump detailed information for all tasks blocking the current RCU
434 * grace period on the specified rcu_node structure.
436 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
439 struct task_struct *t;
441 raw_spin_lock_irqsave(&rnp->lock, flags);
442 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
443 raw_spin_unlock_irqrestore(&rnp->lock, flags);
446 t = list_entry(rnp->gp_tasks,
447 struct task_struct, rcu_node_entry);
448 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
450 raw_spin_unlock_irqrestore(&rnp->lock, flags);
454 * Dump detailed information for all tasks blocking the current RCU
457 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
459 struct rcu_node *rnp = rcu_get_root(rsp);
461 rcu_print_detail_task_stall_rnp(rnp);
462 rcu_for_each_leaf_node(rsp, rnp)
463 rcu_print_detail_task_stall_rnp(rnp);
466 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
468 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
472 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
474 #ifdef CONFIG_RCU_CPU_STALL_INFO
476 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
478 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
479 rnp->level, rnp->grplo, rnp->grphi);
482 static void rcu_print_task_stall_end(void)
484 printk(KERN_CONT "\n");
487 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
489 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
493 static void rcu_print_task_stall_end(void)
497 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
500 * Scan the current list of tasks blocked within RCU read-side critical
501 * sections, printing out the tid of each.
503 static int rcu_print_task_stall(struct rcu_node *rnp)
505 struct task_struct *t;
508 if (!rcu_preempt_blocked_readers_cgp(rnp))
510 rcu_print_task_stall_begin(rnp);
511 t = list_entry(rnp->gp_tasks,
512 struct task_struct, rcu_node_entry);
513 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
514 printk(KERN_CONT " P%d", t->pid);
517 rcu_print_task_stall_end();
522 * Check that the list of blocked tasks for the newly completed grace
523 * period is in fact empty. It is a serious bug to complete a grace
524 * period that still has RCU readers blocked! This function must be
525 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
526 * must be held by the caller.
528 * Also, if there are blocked tasks on the list, they automatically
529 * block the newly created grace period, so set up ->gp_tasks accordingly.
531 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
533 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
534 if (!list_empty(&rnp->blkd_tasks))
535 rnp->gp_tasks = rnp->blkd_tasks.next;
536 WARN_ON_ONCE(rnp->qsmask);
539 #ifdef CONFIG_HOTPLUG_CPU
542 * Handle tasklist migration for case in which all CPUs covered by the
543 * specified rcu_node have gone offline. Move them up to the root
544 * rcu_node. The reason for not just moving them to the immediate
545 * parent is to remove the need for rcu_read_unlock_special() to
546 * make more than two attempts to acquire the target rcu_node's lock.
547 * Returns true if there were tasks blocking the current RCU grace
550 * Returns 1 if there was previously a task blocking the current grace
551 * period on the specified rcu_node structure.
553 * The caller must hold rnp->lock with irqs disabled.
555 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
556 struct rcu_node *rnp,
557 struct rcu_data *rdp)
559 struct list_head *lp;
560 struct list_head *lp_root;
562 struct rcu_node *rnp_root = rcu_get_root(rsp);
563 struct task_struct *t;
565 if (rnp == rnp_root) {
566 WARN_ONCE(1, "Last CPU thought to be offlined?");
567 return 0; /* Shouldn't happen: at least one CPU online. */
570 /* If we are on an internal node, complain bitterly. */
571 WARN_ON_ONCE(rnp != rdp->mynode);
574 * Move tasks up to root rcu_node. Don't try to get fancy for
575 * this corner-case operation -- just put this node's tasks
576 * at the head of the root node's list, and update the root node's
577 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
578 * if non-NULL. This might result in waiting for more tasks than
579 * absolutely necessary, but this is a good performance/complexity
582 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
583 retval |= RCU_OFL_TASKS_NORM_GP;
584 if (rcu_preempted_readers_exp(rnp))
585 retval |= RCU_OFL_TASKS_EXP_GP;
586 lp = &rnp->blkd_tasks;
587 lp_root = &rnp_root->blkd_tasks;
588 while (!list_empty(lp)) {
589 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
590 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
591 list_del(&t->rcu_node_entry);
592 t->rcu_blocked_node = rnp_root;
593 list_add(&t->rcu_node_entry, lp_root);
594 if (&t->rcu_node_entry == rnp->gp_tasks)
595 rnp_root->gp_tasks = rnp->gp_tasks;
596 if (&t->rcu_node_entry == rnp->exp_tasks)
597 rnp_root->exp_tasks = rnp->exp_tasks;
598 #ifdef CONFIG_RCU_BOOST
599 if (&t->rcu_node_entry == rnp->boost_tasks)
600 rnp_root->boost_tasks = rnp->boost_tasks;
601 #endif /* #ifdef CONFIG_RCU_BOOST */
602 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
605 rnp->gp_tasks = NULL;
606 rnp->exp_tasks = NULL;
607 #ifdef CONFIG_RCU_BOOST
608 rnp->boost_tasks = NULL;
610 * In case root is being boosted and leaf was not. Make sure
611 * that we boost the tasks blocking the current grace period
614 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
615 if (rnp_root->boost_tasks != NULL &&
616 rnp_root->boost_tasks != rnp_root->gp_tasks &&
617 rnp_root->boost_tasks != rnp_root->exp_tasks)
618 rnp_root->boost_tasks = rnp_root->gp_tasks;
619 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
620 #endif /* #ifdef CONFIG_RCU_BOOST */
625 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
628 * Check for a quiescent state from the current CPU. When a task blocks,
629 * the task is recorded in the corresponding CPU's rcu_node structure,
630 * which is checked elsewhere.
632 * Caller must disable hard irqs.
634 static void rcu_preempt_check_callbacks(int cpu)
636 struct task_struct *t = current;
638 if (t->rcu_read_lock_nesting == 0) {
642 if (t->rcu_read_lock_nesting > 0 &&
643 per_cpu(rcu_preempt_data, cpu).qs_pending)
644 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
647 #ifdef CONFIG_RCU_BOOST
649 static void rcu_preempt_do_callbacks(void)
651 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
654 #endif /* #ifdef CONFIG_RCU_BOOST */
657 * Queue a preemptible-RCU callback for invocation after a grace period.
659 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
661 __call_rcu(head, func, &rcu_preempt_state, -1, 0);
663 EXPORT_SYMBOL_GPL(call_rcu);
666 * Queue an RCU callback for lazy invocation after a grace period.
667 * This will likely be later named something like "call_rcu_lazy()",
668 * but this change will require some way of tagging the lazy RCU
669 * callbacks in the list of pending callbacks. Until then, this
670 * function may only be called from __kfree_rcu().
672 void kfree_call_rcu(struct rcu_head *head,
673 void (*func)(struct rcu_head *rcu))
675 __call_rcu(head, func, &rcu_preempt_state, -1, 1);
677 EXPORT_SYMBOL_GPL(kfree_call_rcu);
680 * synchronize_rcu - wait until a grace period has elapsed.
682 * Control will return to the caller some time after a full grace
683 * period has elapsed, in other words after all currently executing RCU
684 * read-side critical sections have completed. Note, however, that
685 * upon return from synchronize_rcu(), the caller might well be executing
686 * concurrently with new RCU read-side critical sections that began while
687 * synchronize_rcu() was waiting. RCU read-side critical sections are
688 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
690 * See the description of synchronize_sched() for more detailed information
691 * on memory ordering guarantees.
693 void synchronize_rcu(void)
695 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
696 !lock_is_held(&rcu_lock_map) &&
697 !lock_is_held(&rcu_sched_lock_map),
698 "Illegal synchronize_rcu() in RCU read-side critical section");
699 if (!rcu_scheduler_active)
702 synchronize_rcu_expedited();
704 wait_rcu_gp(call_rcu);
706 EXPORT_SYMBOL_GPL(synchronize_rcu);
708 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
709 static unsigned long sync_rcu_preempt_exp_count;
710 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
713 * Return non-zero if there are any tasks in RCU read-side critical
714 * sections blocking the current preemptible-RCU expedited grace period.
715 * If there is no preemptible-RCU expedited grace period currently in
716 * progress, returns zero unconditionally.
718 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
720 return rnp->exp_tasks != NULL;
724 * return non-zero if there is no RCU expedited grace period in progress
725 * for the specified rcu_node structure, in other words, if all CPUs and
726 * tasks covered by the specified rcu_node structure have done their bit
727 * for the current expedited grace period. Works only for preemptible
728 * RCU -- other RCU implementation use other means.
730 * Caller must hold sync_rcu_preempt_exp_mutex.
732 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
734 return !rcu_preempted_readers_exp(rnp) &&
735 ACCESS_ONCE(rnp->expmask) == 0;
739 * Report the exit from RCU read-side critical section for the last task
740 * that queued itself during or before the current expedited preemptible-RCU
741 * grace period. This event is reported either to the rcu_node structure on
742 * which the task was queued or to one of that rcu_node structure's ancestors,
743 * recursively up the tree. (Calm down, calm down, we do the recursion
746 * Most callers will set the "wake" flag, but the task initiating the
747 * expedited grace period need not wake itself.
749 * Caller must hold sync_rcu_preempt_exp_mutex.
751 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
757 raw_spin_lock_irqsave(&rnp->lock, flags);
759 if (!sync_rcu_preempt_exp_done(rnp)) {
760 raw_spin_unlock_irqrestore(&rnp->lock, flags);
763 if (rnp->parent == NULL) {
764 raw_spin_unlock_irqrestore(&rnp->lock, flags);
766 wake_up(&sync_rcu_preempt_exp_wq);
770 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
772 raw_spin_lock(&rnp->lock); /* irqs already disabled */
773 rnp->expmask &= ~mask;
778 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
779 * grace period for the specified rcu_node structure. If there are no such
780 * tasks, report it up the rcu_node hierarchy.
782 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
783 * CPU hotplug operations.
786 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
791 raw_spin_lock_irqsave(&rnp->lock, flags);
792 if (list_empty(&rnp->blkd_tasks)) {
793 raw_spin_unlock_irqrestore(&rnp->lock, flags);
795 rnp->exp_tasks = rnp->blkd_tasks.next;
796 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
800 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
804 * synchronize_rcu_expedited - Brute-force RCU grace period
806 * Wait for an RCU-preempt grace period, but expedite it. The basic
807 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
808 * the ->blkd_tasks lists and wait for this list to drain. This consumes
809 * significant time on all CPUs and is unfriendly to real-time workloads,
810 * so is thus not recommended for any sort of common-case code.
811 * In fact, if you are using synchronize_rcu_expedited() in a loop,
812 * please restructure your code to batch your updates, and then Use a
813 * single synchronize_rcu() instead.
815 * Note that it is illegal to call this function while holding any lock
816 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
817 * to call this function from a CPU-hotplug notifier. Failing to observe
818 * these restriction will result in deadlock.
820 void synchronize_rcu_expedited(void)
823 struct rcu_node *rnp;
824 struct rcu_state *rsp = &rcu_preempt_state;
828 smp_mb(); /* Caller's modifications seen first by other CPUs. */
829 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
830 smp_mb(); /* Above access cannot bleed into critical section. */
833 * Block CPU-hotplug operations. This means that any CPU-hotplug
834 * operation that finds an rcu_node structure with tasks in the
835 * process of being boosted will know that all tasks blocking
836 * this expedited grace period will already be in the process of
837 * being boosted. This simplifies the process of moving tasks
838 * from leaf to root rcu_node structures.
843 * Acquire lock, falling back to synchronize_rcu() if too many
844 * lock-acquisition failures. Of course, if someone does the
845 * expedited grace period for us, just leave.
847 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
848 if (ULONG_CMP_LT(snap,
849 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
851 goto mb_ret; /* Others did our work for us. */
853 if (trycount++ < 10) {
854 udelay(trycount * num_online_cpus());
857 wait_rcu_gp(call_rcu);
861 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
863 goto unlock_mb_ret; /* Others did our work for us. */
866 /* force all RCU readers onto ->blkd_tasks lists. */
867 synchronize_sched_expedited();
869 /* Initialize ->expmask for all non-leaf rcu_node structures. */
870 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
871 raw_spin_lock_irqsave(&rnp->lock, flags);
872 rnp->expmask = rnp->qsmaskinit;
873 raw_spin_unlock_irqrestore(&rnp->lock, flags);
876 /* Snapshot current state of ->blkd_tasks lists. */
877 rcu_for_each_leaf_node(rsp, rnp)
878 sync_rcu_preempt_exp_init(rsp, rnp);
879 if (NUM_RCU_NODES > 1)
880 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
884 /* Wait for snapshotted ->blkd_tasks lists to drain. */
885 rnp = rcu_get_root(rsp);
886 wait_event(sync_rcu_preempt_exp_wq,
887 sync_rcu_preempt_exp_done(rnp));
889 /* Clean up and exit. */
890 smp_mb(); /* ensure expedited GP seen before counter increment. */
891 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
893 mutex_unlock(&sync_rcu_preempt_exp_mutex);
895 smp_mb(); /* ensure subsequent action seen after grace period. */
897 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
900 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
902 * Note that this primitive does not necessarily wait for an RCU grace period
903 * to complete. For example, if there are no RCU callbacks queued anywhere
904 * in the system, then rcu_barrier() is within its rights to return
905 * immediately, without waiting for anything, much less an RCU grace period.
907 void rcu_barrier(void)
909 _rcu_barrier(&rcu_preempt_state);
911 EXPORT_SYMBOL_GPL(rcu_barrier);
914 * Initialize preemptible RCU's state structures.
916 static void __init __rcu_init_preempt(void)
918 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
921 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
923 static struct rcu_state *rcu_state = &rcu_sched_state;
926 * Tell them what RCU they are running.
928 static void __init rcu_bootup_announce(void)
930 printk(KERN_INFO "Hierarchical RCU implementation.\n");
931 rcu_bootup_announce_oddness();
935 * Return the number of RCU batches processed thus far for debug & stats.
937 long rcu_batches_completed(void)
939 return rcu_batches_completed_sched();
941 EXPORT_SYMBOL_GPL(rcu_batches_completed);
944 * Force a quiescent state for RCU, which, because there is no preemptible
945 * RCU, becomes the same as rcu-sched.
947 void rcu_force_quiescent_state(void)
949 rcu_sched_force_quiescent_state();
951 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
954 * Because preemptible RCU does not exist, we never have to check for
955 * CPUs being in quiescent states.
957 static void rcu_preempt_note_context_switch(int cpu)
962 * Because preemptible RCU does not exist, there are never any preempted
965 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
970 #ifdef CONFIG_HOTPLUG_CPU
972 /* Because preemptible RCU does not exist, no quieting of tasks. */
973 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
975 raw_spin_unlock_irqrestore(&rnp->lock, flags);
978 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
981 * Because preemptible RCU does not exist, we never have to check for
982 * tasks blocked within RCU read-side critical sections.
984 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
989 * Because preemptible RCU does not exist, we never have to check for
990 * tasks blocked within RCU read-side critical sections.
992 static int rcu_print_task_stall(struct rcu_node *rnp)
998 * Because there is no preemptible RCU, there can be no readers blocked,
999 * so there is no need to check for blocked tasks. So check only for
1000 * bogus qsmask values.
1002 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1004 WARN_ON_ONCE(rnp->qsmask);
1007 #ifdef CONFIG_HOTPLUG_CPU
1010 * Because preemptible RCU does not exist, it never needs to migrate
1011 * tasks that were blocked within RCU read-side critical sections, and
1012 * such non-existent tasks cannot possibly have been blocking the current
1015 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1016 struct rcu_node *rnp,
1017 struct rcu_data *rdp)
1022 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1025 * Because preemptible RCU does not exist, it never has any callbacks
1028 static void rcu_preempt_check_callbacks(int cpu)
1033 * Queue an RCU callback for lazy invocation after a grace period.
1034 * This will likely be later named something like "call_rcu_lazy()",
1035 * but this change will require some way of tagging the lazy RCU
1036 * callbacks in the list of pending callbacks. Until then, this
1037 * function may only be called from __kfree_rcu().
1039 * Because there is no preemptible RCU, we use RCU-sched instead.
1041 void kfree_call_rcu(struct rcu_head *head,
1042 void (*func)(struct rcu_head *rcu))
1044 __call_rcu(head, func, &rcu_sched_state, -1, 1);
1046 EXPORT_SYMBOL_GPL(kfree_call_rcu);
1049 * Wait for an rcu-preempt grace period, but make it happen quickly.
1050 * But because preemptible RCU does not exist, map to rcu-sched.
1052 void synchronize_rcu_expedited(void)
1054 synchronize_sched_expedited();
1056 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1058 #ifdef CONFIG_HOTPLUG_CPU
1061 * Because preemptible RCU does not exist, there is never any need to
1062 * report on tasks preempted in RCU read-side critical sections during
1063 * expedited RCU grace periods.
1065 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1070 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1073 * Because preemptible RCU does not exist, rcu_barrier() is just
1074 * another name for rcu_barrier_sched().
1076 void rcu_barrier(void)
1078 rcu_barrier_sched();
1080 EXPORT_SYMBOL_GPL(rcu_barrier);
1083 * Because preemptible RCU does not exist, it need not be initialized.
1085 static void __init __rcu_init_preempt(void)
1089 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1091 #ifdef CONFIG_RCU_BOOST
1093 #include "rtmutex_common.h"
1095 #ifdef CONFIG_RCU_TRACE
1097 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1099 if (list_empty(&rnp->blkd_tasks))
1100 rnp->n_balk_blkd_tasks++;
1101 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1102 rnp->n_balk_exp_gp_tasks++;
1103 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1104 rnp->n_balk_boost_tasks++;
1105 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1106 rnp->n_balk_notblocked++;
1107 else if (rnp->gp_tasks != NULL &&
1108 ULONG_CMP_LT(jiffies, rnp->boost_time))
1109 rnp->n_balk_notyet++;
1114 #else /* #ifdef CONFIG_RCU_TRACE */
1116 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1120 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1122 static void rcu_wake_cond(struct task_struct *t, int status)
1125 * If the thread is yielding, only wake it when this
1126 * is invoked from idle
1128 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1133 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1134 * or ->boost_tasks, advancing the pointer to the next task in the
1135 * ->blkd_tasks list.
1137 * Note that irqs must be enabled: boosting the task can block.
1138 * Returns 1 if there are more tasks needing to be boosted.
1140 static int rcu_boost(struct rcu_node *rnp)
1142 unsigned long flags;
1143 struct rt_mutex mtx;
1144 struct task_struct *t;
1145 struct list_head *tb;
1147 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1148 return 0; /* Nothing left to boost. */
1150 raw_spin_lock_irqsave(&rnp->lock, flags);
1153 * Recheck under the lock: all tasks in need of boosting
1154 * might exit their RCU read-side critical sections on their own.
1156 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1157 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1162 * Preferentially boost tasks blocking expedited grace periods.
1163 * This cannot starve the normal grace periods because a second
1164 * expedited grace period must boost all blocked tasks, including
1165 * those blocking the pre-existing normal grace period.
1167 if (rnp->exp_tasks != NULL) {
1168 tb = rnp->exp_tasks;
1169 rnp->n_exp_boosts++;
1171 tb = rnp->boost_tasks;
1172 rnp->n_normal_boosts++;
1174 rnp->n_tasks_boosted++;
1177 * We boost task t by manufacturing an rt_mutex that appears to
1178 * be held by task t. We leave a pointer to that rt_mutex where
1179 * task t can find it, and task t will release the mutex when it
1180 * exits its outermost RCU read-side critical section. Then
1181 * simply acquiring this artificial rt_mutex will boost task
1182 * t's priority. (Thanks to tglx for suggesting this approach!)
1184 * Note that task t must acquire rnp->lock to remove itself from
1185 * the ->blkd_tasks list, which it will do from exit() if from
1186 * nowhere else. We therefore are guaranteed that task t will
1187 * stay around at least until we drop rnp->lock. Note that
1188 * rnp->lock also resolves races between our priority boosting
1189 * and task t's exiting its outermost RCU read-side critical
1192 t = container_of(tb, struct task_struct, rcu_node_entry);
1193 rt_mutex_init_proxy_locked(&mtx, t);
1194 t->rcu_boost_mutex = &mtx;
1195 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1196 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1197 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1199 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1200 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1204 * Priority-boosting kthread. One per leaf rcu_node and one for the
1207 static int rcu_boost_kthread(void *arg)
1209 struct rcu_node *rnp = (struct rcu_node *)arg;
1213 trace_rcu_utilization("Start boost kthread@init");
1215 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1216 trace_rcu_utilization("End boost kthread@rcu_wait");
1217 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1218 trace_rcu_utilization("Start boost kthread@rcu_wait");
1219 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1220 more2boost = rcu_boost(rnp);
1226 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1227 trace_rcu_utilization("End boost kthread@rcu_yield");
1228 schedule_timeout_interruptible(2);
1229 trace_rcu_utilization("Start boost kthread@rcu_yield");
1234 trace_rcu_utilization("End boost kthread@notreached");
1239 * Check to see if it is time to start boosting RCU readers that are
1240 * blocking the current grace period, and, if so, tell the per-rcu_node
1241 * kthread to start boosting them. If there is an expedited grace
1242 * period in progress, it is always time to boost.
1244 * The caller must hold rnp->lock, which this function releases.
1245 * The ->boost_kthread_task is immortal, so we don't need to worry
1246 * about it going away.
1248 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1250 struct task_struct *t;
1252 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1253 rnp->n_balk_exp_gp_tasks++;
1254 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1257 if (rnp->exp_tasks != NULL ||
1258 (rnp->gp_tasks != NULL &&
1259 rnp->boost_tasks == NULL &&
1261 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1262 if (rnp->exp_tasks == NULL)
1263 rnp->boost_tasks = rnp->gp_tasks;
1264 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1265 t = rnp->boost_kthread_task;
1267 rcu_wake_cond(t, rnp->boost_kthread_status);
1269 rcu_initiate_boost_trace(rnp);
1270 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1275 * Wake up the per-CPU kthread to invoke RCU callbacks.
1277 static void invoke_rcu_callbacks_kthread(void)
1279 unsigned long flags;
1281 local_irq_save(flags);
1282 __this_cpu_write(rcu_cpu_has_work, 1);
1283 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1284 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1285 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1286 __this_cpu_read(rcu_cpu_kthread_status));
1288 local_irq_restore(flags);
1292 * Is the current CPU running the RCU-callbacks kthread?
1293 * Caller must have preemption disabled.
1295 static bool rcu_is_callbacks_kthread(void)
1297 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1300 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1303 * Do priority-boost accounting for the start of a new grace period.
1305 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1307 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1311 * Create an RCU-boost kthread for the specified node if one does not
1312 * already exist. We only create this kthread for preemptible RCU.
1313 * Returns zero if all is well, a negated errno otherwise.
1315 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1316 struct rcu_node *rnp)
1318 int rnp_index = rnp - &rsp->node[0];
1319 unsigned long flags;
1320 struct sched_param sp;
1321 struct task_struct *t;
1323 if (&rcu_preempt_state != rsp)
1326 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1330 if (rnp->boost_kthread_task != NULL)
1332 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1333 "rcub/%d", rnp_index);
1336 raw_spin_lock_irqsave(&rnp->lock, flags);
1337 rnp->boost_kthread_task = t;
1338 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1339 sp.sched_priority = RCU_BOOST_PRIO;
1340 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1341 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1345 static void rcu_kthread_do_work(void)
1347 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1348 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1349 rcu_preempt_do_callbacks();
1352 static void rcu_cpu_kthread_setup(unsigned int cpu)
1354 struct sched_param sp;
1356 sp.sched_priority = RCU_KTHREAD_PRIO;
1357 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1360 static void rcu_cpu_kthread_park(unsigned int cpu)
1362 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1365 static int rcu_cpu_kthread_should_run(unsigned int cpu)
1367 return __get_cpu_var(rcu_cpu_has_work);
1371 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1372 * RCU softirq used in flavors and configurations of RCU that do not
1373 * support RCU priority boosting.
1375 static void rcu_cpu_kthread(unsigned int cpu)
1377 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1378 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1381 for (spincnt = 0; spincnt < 10; spincnt++) {
1382 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1384 *statusp = RCU_KTHREAD_RUNNING;
1385 this_cpu_inc(rcu_cpu_kthread_loops);
1386 local_irq_disable();
1391 rcu_kthread_do_work();
1394 trace_rcu_utilization("End CPU kthread@rcu_wait");
1395 *statusp = RCU_KTHREAD_WAITING;
1399 *statusp = RCU_KTHREAD_YIELDING;
1400 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1401 schedule_timeout_interruptible(2);
1402 trace_rcu_utilization("End CPU kthread@rcu_yield");
1403 *statusp = RCU_KTHREAD_WAITING;
1407 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1408 * served by the rcu_node in question. The CPU hotplug lock is still
1409 * held, so the value of rnp->qsmaskinit will be stable.
1411 * We don't include outgoingcpu in the affinity set, use -1 if there is
1412 * no outgoing CPU. If there are no CPUs left in the affinity set,
1413 * this function allows the kthread to execute on any CPU.
1415 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1417 struct task_struct *t = rnp->boost_kthread_task;
1418 unsigned long mask = rnp->qsmaskinit;
1424 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1426 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1427 if ((mask & 0x1) && cpu != outgoingcpu)
1428 cpumask_set_cpu(cpu, cm);
1429 if (cpumask_weight(cm) == 0) {
1431 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1432 cpumask_clear_cpu(cpu, cm);
1433 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1435 set_cpus_allowed_ptr(t, cm);
1436 free_cpumask_var(cm);
1439 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1440 .store = &rcu_cpu_kthread_task,
1441 .thread_should_run = rcu_cpu_kthread_should_run,
1442 .thread_fn = rcu_cpu_kthread,
1443 .thread_comm = "rcuc/%u",
1444 .setup = rcu_cpu_kthread_setup,
1445 .park = rcu_cpu_kthread_park,
1449 * Spawn all kthreads -- called as soon as the scheduler is running.
1451 static int __init rcu_spawn_kthreads(void)
1453 struct rcu_node *rnp;
1456 rcu_scheduler_fully_active = 1;
1457 for_each_possible_cpu(cpu)
1458 per_cpu(rcu_cpu_has_work, cpu) = 0;
1459 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1460 rnp = rcu_get_root(rcu_state);
1461 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1462 if (NUM_RCU_NODES > 1) {
1463 rcu_for_each_leaf_node(rcu_state, rnp)
1464 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1468 early_initcall(rcu_spawn_kthreads);
1470 static void __cpuinit rcu_prepare_kthreads(int cpu)
1472 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1473 struct rcu_node *rnp = rdp->mynode;
1475 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1476 if (rcu_scheduler_fully_active)
1477 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1480 #else /* #ifdef CONFIG_RCU_BOOST */
1482 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1484 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1487 static void invoke_rcu_callbacks_kthread(void)
1492 static bool rcu_is_callbacks_kthread(void)
1497 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1501 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1505 static int __init rcu_scheduler_really_started(void)
1507 rcu_scheduler_fully_active = 1;
1510 early_initcall(rcu_scheduler_really_started);
1512 static void __cpuinit rcu_prepare_kthreads(int cpu)
1516 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1518 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1521 * Check to see if any future RCU-related work will need to be done
1522 * by the current CPU, even if none need be done immediately, returning
1523 * 1 if so. This function is part of the RCU implementation; it is -not-
1524 * an exported member of the RCU API.
1526 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1527 * any flavor of RCU.
1529 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1531 *delta_jiffies = ULONG_MAX;
1532 return rcu_cpu_has_callbacks(cpu);
1536 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1538 static void rcu_prepare_for_idle_init(int cpu)
1543 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1546 static void rcu_cleanup_after_idle(int cpu)
1551 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1554 static void rcu_prepare_for_idle(int cpu)
1559 * Don't bother keeping a running count of the number of RCU callbacks
1560 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1562 static void rcu_idle_count_callbacks_posted(void)
1566 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1569 * This code is invoked when a CPU goes idle, at which point we want
1570 * to have the CPU do everything required for RCU so that it can enter
1571 * the energy-efficient dyntick-idle mode. This is handled by a
1572 * state machine implemented by rcu_prepare_for_idle() below.
1574 * The following three proprocessor symbols control this state machine:
1576 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1577 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1578 * scheduling-clock interrupt than to loop through the state machine
1580 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1581 * optional if RCU does not need anything immediately from this
1582 * CPU, even if this CPU still has RCU callbacks queued. The first
1583 * times through the state machine are mandatory: we need to give
1584 * the state machine a chance to communicate a quiescent state
1586 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1587 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1588 * is sized to be roughly one RCU grace period. Those energy-efficiency
1589 * benchmarkers who might otherwise be tempted to set this to a large
1590 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1591 * system. And if you are -that- concerned about energy efficiency,
1592 * just power the system down and be done with it!
1593 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1594 * permitted to sleep in dyntick-idle mode with only lazy RCU
1595 * callbacks pending. Setting this too high can OOM your system.
1597 * The values below work well in practice. If future workloads require
1598 * adjustment, they can be converted into kernel config parameters, though
1599 * making the state machine smarter might be a better option.
1601 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1602 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
1603 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
1604 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1606 extern int tick_nohz_enabled;
1609 * Does the specified flavor of RCU have non-lazy callbacks pending on
1610 * the specified CPU? Both RCU flavor and CPU are specified by the
1611 * rcu_data structure.
1613 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
1615 return rdp->qlen != rdp->qlen_lazy;
1618 #ifdef CONFIG_TREE_PREEMPT_RCU
1621 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
1622 * is no RCU-preempt in the kernel.)
1624 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1626 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
1628 return __rcu_cpu_has_nonlazy_callbacks(rdp);
1631 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1633 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1638 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
1641 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
1643 static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
1645 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
1646 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
1647 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
1651 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1652 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1653 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1654 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1655 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1656 * it is better to incur scheduling-clock interrupts than to spin
1657 * continuously for the same time duration!
1659 * The delta_jiffies argument is used to store the time when RCU is
1660 * going to need the CPU again if it still has callbacks. The reason
1661 * for this is that rcu_prepare_for_idle() might need to post a timer,
1662 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
1663 * the wakeup time for this CPU. This means that RCU's timer can be
1664 * delayed until the wakeup time, which defeats the purpose of posting
1667 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1669 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1671 /* Flag a new idle sojourn to the idle-entry state machine. */
1672 rdtp->idle_first_pass = 1;
1673 /* If no callbacks, RCU doesn't need the CPU. */
1674 if (!rcu_cpu_has_callbacks(cpu)) {
1675 *delta_jiffies = ULONG_MAX;
1678 if (rdtp->dyntick_holdoff == jiffies) {
1679 /* RCU recently tried and failed, so don't try again. */
1683 /* Set up for the possibility that RCU will post a timer. */
1684 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1685 *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
1686 RCU_IDLE_GP_DELAY) - jiffies;
1688 *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
1689 *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
1695 * Handler for smp_call_function_single(). The only point of this
1696 * handler is to wake the CPU up, so the handler does only tracing.
1698 void rcu_idle_demigrate(void *unused)
1700 trace_rcu_prep_idle("Demigrate");
1704 * Timer handler used to force CPU to start pushing its remaining RCU
1705 * callbacks in the case where it entered dyntick-idle mode with callbacks
1706 * pending. The hander doesn't really need to do anything because the
1707 * real work is done upon re-entry to idle, or by the next scheduling-clock
1708 * interrupt should idle not be re-entered.
1710 * One special case: the timer gets migrated without awakening the CPU
1711 * on which the timer was scheduled on. In this case, we must wake up
1712 * that CPU. We do so with smp_call_function_single().
1714 static void rcu_idle_gp_timer_func(unsigned long cpu_in)
1716 int cpu = (int)cpu_in;
1718 trace_rcu_prep_idle("Timer");
1719 if (cpu != smp_processor_id())
1720 smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
1722 WARN_ON_ONCE(1); /* Getting here can hang the system... */
1726 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
1728 static void rcu_prepare_for_idle_init(int cpu)
1730 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1732 rdtp->dyntick_holdoff = jiffies - 1;
1733 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
1734 rdtp->idle_gp_timer_expires = jiffies - 1;
1735 rdtp->idle_first_pass = 1;
1739 * Clean up for exit from idle. Because we are exiting from idle, there
1740 * is no longer any point to ->idle_gp_timer, so cancel it. This will
1741 * do nothing if this timer is not active, so just cancel it unconditionally.
1743 static void rcu_cleanup_after_idle(int cpu)
1745 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1747 del_timer(&rdtp->idle_gp_timer);
1748 trace_rcu_prep_idle("Cleanup after idle");
1749 rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
1753 * Check to see if any RCU-related work can be done by the current CPU,
1754 * and if so, schedule a softirq to get it done. This function is part
1755 * of the RCU implementation; it is -not- an exported member of the RCU API.
1757 * The idea is for the current CPU to clear out all work required by the
1758 * RCU core for the current grace period, so that this CPU can be permitted
1759 * to enter dyntick-idle mode. In some cases, it will need to be awakened
1760 * at the end of the grace period by whatever CPU ends the grace period.
1761 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
1762 * number of wakeups by a modest integer factor.
1764 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1765 * disabled, we do one pass of force_quiescent_state(), then do a
1766 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1767 * later. The ->dyntick_drain field controls the sequencing.
1769 * The caller must have disabled interrupts.
1771 static void rcu_prepare_for_idle(int cpu)
1773 struct timer_list *tp;
1774 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1777 /* Handle nohz enablement switches conservatively. */
1778 tne = ACCESS_ONCE(tick_nohz_enabled);
1779 if (tne != rdtp->tick_nohz_enabled_snap) {
1780 if (rcu_cpu_has_callbacks(cpu))
1781 invoke_rcu_core(); /* force nohz to see update. */
1782 rdtp->tick_nohz_enabled_snap = tne;
1788 /* Adaptive-tick mode, where usermode execution is idle to RCU. */
1789 if (!is_idle_task(current)) {
1790 rdtp->dyntick_holdoff = jiffies - 1;
1791 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1792 trace_rcu_prep_idle("User dyntick with callbacks");
1793 rdtp->idle_gp_timer_expires =
1794 round_up(jiffies + RCU_IDLE_GP_DELAY,
1796 } else if (rcu_cpu_has_callbacks(cpu)) {
1797 rdtp->idle_gp_timer_expires =
1798 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
1799 trace_rcu_prep_idle("User dyntick with lazy callbacks");
1803 tp = &rdtp->idle_gp_timer;
1804 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1809 * If this is an idle re-entry, for example, due to use of
1810 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
1811 * loop, then don't take any state-machine actions, unless the
1812 * momentary exit from idle queued additional non-lazy callbacks.
1813 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
1816 if (!rdtp->idle_first_pass &&
1817 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
1818 if (rcu_cpu_has_callbacks(cpu)) {
1819 tp = &rdtp->idle_gp_timer;
1820 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1824 rdtp->idle_first_pass = 0;
1825 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
1828 * If there are no callbacks on this CPU, enter dyntick-idle mode.
1829 * Also reset state to avoid prejudicing later attempts.
1831 if (!rcu_cpu_has_callbacks(cpu)) {
1832 rdtp->dyntick_holdoff = jiffies - 1;
1833 rdtp->dyntick_drain = 0;
1834 trace_rcu_prep_idle("No callbacks");
1839 * If in holdoff mode, just return. We will presumably have
1840 * refrained from disabling the scheduling-clock tick.
1842 if (rdtp->dyntick_holdoff == jiffies) {
1843 trace_rcu_prep_idle("In holdoff");
1847 /* Check and update the ->dyntick_drain sequencing. */
1848 if (rdtp->dyntick_drain <= 0) {
1849 /* First time through, initialize the counter. */
1850 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
1851 } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
1852 !rcu_pending(cpu) &&
1853 !local_softirq_pending()) {
1854 /* Can we go dyntick-idle despite still having callbacks? */
1855 rdtp->dyntick_drain = 0;
1856 rdtp->dyntick_holdoff = jiffies;
1857 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1858 trace_rcu_prep_idle("Dyntick with callbacks");
1859 rdtp->idle_gp_timer_expires =
1860 round_up(jiffies + RCU_IDLE_GP_DELAY,
1863 rdtp->idle_gp_timer_expires =
1864 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
1865 trace_rcu_prep_idle("Dyntick with lazy callbacks");
1867 tp = &rdtp->idle_gp_timer;
1868 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1869 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1870 return; /* Nothing more to do immediately. */
1871 } else if (--(rdtp->dyntick_drain) <= 0) {
1872 /* We have hit the limit, so time to give up. */
1873 rdtp->dyntick_holdoff = jiffies;
1874 trace_rcu_prep_idle("Begin holdoff");
1875 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
1880 * Do one step of pushing the remaining RCU callbacks through
1881 * the RCU core state machine.
1883 #ifdef CONFIG_TREE_PREEMPT_RCU
1884 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
1885 rcu_preempt_qs(cpu);
1886 force_quiescent_state(&rcu_preempt_state);
1888 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1889 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
1891 force_quiescent_state(&rcu_sched_state);
1893 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
1895 force_quiescent_state(&rcu_bh_state);
1899 * If RCU callbacks are still pending, RCU still needs this CPU.
1900 * So try forcing the callbacks through the grace period.
1902 if (rcu_cpu_has_callbacks(cpu)) {
1903 trace_rcu_prep_idle("More callbacks");
1906 trace_rcu_prep_idle("Callbacks drained");
1911 * Keep a running count of the number of non-lazy callbacks posted
1912 * on this CPU. This running counter (which is never decremented) allows
1913 * rcu_prepare_for_idle() to detect when something out of the idle loop
1914 * posts a callback, even if an equal number of callbacks are invoked.
1915 * Of course, callbacks should only be posted from within a trace event
1916 * designed to be called from idle or from within RCU_NONIDLE().
1918 static void rcu_idle_count_callbacks_posted(void)
1920 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1924 * Data for flushing lazy RCU callbacks at OOM time.
1926 static atomic_t oom_callback_count;
1927 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1930 * RCU OOM callback -- decrement the outstanding count and deliver the
1931 * wake-up if we are the last one.
1933 static void rcu_oom_callback(struct rcu_head *rhp)
1935 if (atomic_dec_and_test(&oom_callback_count))
1936 wake_up(&oom_callback_wq);
1940 * Post an rcu_oom_notify callback on the current CPU if it has at
1941 * least one lazy callback. This will unnecessarily post callbacks
1942 * to CPUs that already have a non-lazy callback at the end of their
1943 * callback list, but this is an infrequent operation, so accept some
1944 * extra overhead to keep things simple.
1946 static void rcu_oom_notify_cpu(void *unused)
1948 struct rcu_state *rsp;
1949 struct rcu_data *rdp;
1951 for_each_rcu_flavor(rsp) {
1952 rdp = __this_cpu_ptr(rsp->rda);
1953 if (rdp->qlen_lazy != 0) {
1954 atomic_inc(&oom_callback_count);
1955 rsp->call(&rdp->oom_head, rcu_oom_callback);
1961 * If low on memory, ensure that each CPU has a non-lazy callback.
1962 * This will wake up CPUs that have only lazy callbacks, in turn
1963 * ensuring that they free up the corresponding memory in a timely manner.
1964 * Because an uncertain amount of memory will be freed in some uncertain
1965 * timeframe, we do not claim to have freed anything.
1967 static int rcu_oom_notify(struct notifier_block *self,
1968 unsigned long notused, void *nfreed)
1972 /* Wait for callbacks from earlier instance to complete. */
1973 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1976 * Prevent premature wakeup: ensure that all increments happen
1977 * before there is a chance of the counter reaching zero.
1979 atomic_set(&oom_callback_count, 1);
1982 for_each_online_cpu(cpu) {
1983 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1988 /* Unconditionally decrement: no need to wake ourselves up. */
1989 atomic_dec(&oom_callback_count);
1994 static struct notifier_block rcu_oom_nb = {
1995 .notifier_call = rcu_oom_notify
1998 static int __init rcu_register_oom_notifier(void)
2000 register_oom_notifier(&rcu_oom_nb);
2003 early_initcall(rcu_register_oom_notifier);
2005 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2007 #ifdef CONFIG_RCU_CPU_STALL_INFO
2009 #ifdef CONFIG_RCU_FAST_NO_HZ
2011 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2013 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2014 struct timer_list *tltp = &rdtp->idle_gp_timer;
2017 c = rdtp->dyntick_holdoff == jiffies ? 'H' : '.';
2018 if (timer_pending(tltp))
2019 sprintf(cp, "drain=%d %c timer=%lu",
2020 rdtp->dyntick_drain, c, tltp->expires - jiffies);
2022 sprintf(cp, "drain=%d %c timer not pending",
2023 rdtp->dyntick_drain, c);
2026 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2028 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2033 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2035 /* Initiate the stall-info list. */
2036 static void print_cpu_stall_info_begin(void)
2038 printk(KERN_CONT "\n");
2042 * Print out diagnostic information for the specified stalled CPU.
2044 * If the specified CPU is aware of the current RCU grace period
2045 * (flavor specified by rsp), then print the number of scheduling
2046 * clock interrupts the CPU has taken during the time that it has
2047 * been aware. Otherwise, print the number of RCU grace periods
2048 * that this CPU is ignorant of, for example, "1" if the CPU was
2049 * aware of the previous grace period.
2051 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2053 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2055 char fast_no_hz[72];
2056 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2057 struct rcu_dynticks *rdtp = rdp->dynticks;
2059 unsigned long ticks_value;
2061 if (rsp->gpnum == rdp->gpnum) {
2062 ticks_title = "ticks this GP";
2063 ticks_value = rdp->ticks_this_gp;
2065 ticks_title = "GPs behind";
2066 ticks_value = rsp->gpnum - rdp->gpnum;
2068 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2069 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2070 cpu, ticks_value, ticks_title,
2071 atomic_read(&rdtp->dynticks) & 0xfff,
2072 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2076 /* Terminate the stall-info list. */
2077 static void print_cpu_stall_info_end(void)
2079 printk(KERN_ERR "\t");
2082 /* Zero ->ticks_this_gp for all flavors of RCU. */
2083 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2085 rdp->ticks_this_gp = 0;
2088 /* Increment ->ticks_this_gp for all flavors of RCU. */
2089 static void increment_cpu_stall_ticks(void)
2091 struct rcu_state *rsp;
2093 for_each_rcu_flavor(rsp)
2094 __this_cpu_ptr(rsp->rda)->ticks_this_gp++;
2097 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2099 static void print_cpu_stall_info_begin(void)
2101 printk(KERN_CONT " {");
2104 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2106 printk(KERN_CONT " %d", cpu);
2109 static void print_cpu_stall_info_end(void)
2111 printk(KERN_CONT "} ");
2114 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2118 static void increment_cpu_stall_ticks(void)
2122 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
2124 #ifdef CONFIG_RCU_NOCB_CPU
2127 * Offload callback processing from the boot-time-specified set of CPUs
2128 * specified by rcu_nocb_mask. For each CPU in the set, there is a
2129 * kthread created that pulls the callbacks from the corresponding CPU,
2130 * waits for a grace period to elapse, and invokes the callbacks.
2131 * The no-CBs CPUs do a wake_up() on their kthread when they insert
2132 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
2133 * has been specified, in which case each kthread actively polls its
2134 * CPU. (Which isn't so great for energy efficiency, but which does
2135 * reduce RCU's overhead on that CPU.)
2137 * This is intended to be used in conjunction with Frederic Weisbecker's
2138 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
2139 * running CPU-bound user-mode computations.
2141 * Offloading of callback processing could also in theory be used as
2142 * an energy-efficiency measure because CPUs with no RCU callbacks
2143 * queued are more aggressive about entering dyntick-idle mode.
2147 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
2148 static int __init rcu_nocb_setup(char *str)
2150 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
2151 have_rcu_nocb_mask = true;
2152 cpulist_parse(str, rcu_nocb_mask);
2155 __setup("rcu_nocbs=", rcu_nocb_setup);
2157 static int __init parse_rcu_nocb_poll(char *arg)
2162 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2165 * Does this CPU needs a grace period due to offloaded callbacks?
2167 static int rcu_nocb_needs_gp(struct rcu_data *rdp)
2169 return rdp->nocb_needs_gp;
2172 /* Is the specified CPU a no-CPUs CPU? */
2173 static bool is_nocb_cpu(int cpu)
2175 if (have_rcu_nocb_mask)
2176 return cpumask_test_cpu(cpu, rcu_nocb_mask);
2181 * Enqueue the specified string of rcu_head structures onto the specified
2182 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
2183 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
2184 * counts are supplied by rhcount and rhcount_lazy.
2186 * If warranted, also wake up the kthread servicing this CPUs queues.
2188 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2189 struct rcu_head *rhp,
2190 struct rcu_head **rhtp,
2191 int rhcount, int rhcount_lazy)
2194 struct rcu_head **old_rhpp;
2195 struct task_struct *t;
2197 /* Enqueue the callback on the nocb list and update counts. */
2198 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2199 ACCESS_ONCE(*old_rhpp) = rhp;
2200 atomic_long_add(rhcount, &rdp->nocb_q_count);
2201 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2203 /* If we are not being polled and there is a kthread, awaken it ... */
2204 t = ACCESS_ONCE(rdp->nocb_kthread);
2205 if (rcu_nocb_poll | !t)
2207 len = atomic_long_read(&rdp->nocb_q_count);
2208 if (old_rhpp == &rdp->nocb_head) {
2209 wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
2210 rdp->qlen_last_fqs_check = 0;
2211 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2212 wake_up_process(t); /* ... or if many callbacks queued. */
2213 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2219 * This is a helper for __call_rcu(), which invokes this when the normal
2220 * callback queue is inoperable. If this is not a no-CBs CPU, this
2221 * function returns failure back to __call_rcu(), which can complain
2224 * Otherwise, this function queues the callback where the corresponding
2225 * "rcuo" kthread can find it.
2227 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2231 if (!is_nocb_cpu(rdp->cpu))
2233 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
2238 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2241 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2242 struct rcu_data *rdp)
2244 long ql = rsp->qlen;
2245 long qll = rsp->qlen_lazy;
2247 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2248 if (!is_nocb_cpu(smp_processor_id()))
2253 /* First, enqueue the donelist, if any. This preserves CB ordering. */
2254 if (rsp->orphan_donelist != NULL) {
2255 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2256 rsp->orphan_donetail, ql, qll);
2258 rsp->orphan_donelist = NULL;
2259 rsp->orphan_donetail = &rsp->orphan_donelist;
2261 if (rsp->orphan_nxtlist != NULL) {
2262 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2263 rsp->orphan_nxttail, ql, qll);
2265 rsp->orphan_nxtlist = NULL;
2266 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2272 * If necessary, kick off a new grace period, and either way wait
2273 * for a subsequent grace period to complete.
2275 static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2278 unsigned long flags;
2280 struct rcu_node *rnp = rdp->mynode;
2282 raw_spin_lock_irqsave(&rnp->lock, flags);
2283 c = rnp->completed + 2;
2284 rdp->nocb_needs_gp = true;
2285 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2288 * Wait for the grace period. Do so interruptibly to avoid messing
2289 * up the load average.
2293 schedule_timeout_interruptible(2);
2294 raw_spin_lock_irqsave(&rnp->lock, flags);
2295 if (ULONG_CMP_GE(rnp->completed, c)) {
2296 rdp->nocb_needs_gp = false;
2297 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2301 flush_signals(current);
2302 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2304 smp_mb(); /* Ensure that CB invocation happens after GP end. */
2308 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes
2309 * callbacks queued by the corresponding no-CBs CPU.
2311 static int rcu_nocb_kthread(void *arg)
2314 struct rcu_head *list;
2315 struct rcu_head *next;
2316 struct rcu_head **tail;
2317 struct rcu_data *rdp = arg;
2319 /* Each pass through this loop invokes one batch of callbacks */
2321 /* If not polling, wait for next batch of callbacks. */
2323 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2324 list = ACCESS_ONCE(rdp->nocb_head);
2326 schedule_timeout_interruptible(1);
2327 flush_signals(current);
2332 * Extract queued callbacks, update counts, and wait
2333 * for a grace period to elapse.
2335 ACCESS_ONCE(rdp->nocb_head) = NULL;
2336 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2337 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2338 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2339 ACCESS_ONCE(rdp->nocb_p_count) += c;
2340 ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
2341 rcu_nocb_wait_gp(rdp);
2343 /* Each pass through the following loop invokes a callback. */
2344 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
2348 /* Wait for enqueuing to complete, if needed. */
2349 while (next == NULL && &list->next != tail) {
2350 schedule_timeout_interruptible(1);
2353 debug_rcu_head_unqueue(list);
2355 if (__rcu_reclaim(rdp->rsp->name, list))
2361 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2362 ACCESS_ONCE(rdp->nocb_p_count) -= c;
2363 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
2364 rdp->n_nocbs_invoked += c;
2369 /* Initialize per-rcu_data variables for no-CBs CPUs. */
2370 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2372 rdp->nocb_tail = &rdp->nocb_head;
2373 init_waitqueue_head(&rdp->nocb_wq);
2376 /* Create a kthread for each RCU flavor for each no-CBs CPU. */
2377 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2380 struct rcu_data *rdp;
2381 struct task_struct *t;
2383 if (rcu_nocb_mask == NULL)
2385 for_each_cpu(cpu, rcu_nocb_mask) {
2386 rdp = per_cpu_ptr(rsp->rda, cpu);
2387 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
2389 ACCESS_ONCE(rdp->nocb_kthread) = t;
2393 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2394 static bool init_nocb_callback_list(struct rcu_data *rdp)
2396 if (rcu_nocb_mask == NULL ||
2397 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
2399 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
2403 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
2405 static int rcu_nocb_needs_gp(struct rcu_data *rdp)
2410 static bool is_nocb_cpu(int cpu)
2415 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2421 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2422 struct rcu_data *rdp)
2427 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2431 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2435 static bool init_nocb_callback_list(struct rcu_data *rdp)
2440 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */