2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
28 #include <linux/stop_machine.h>
30 #define RCU_KTHREAD_PRIO 1
32 #ifdef CONFIG_RCU_BOOST
33 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
35 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
39 * Check the RCU kernel configuration parameters and print informative
40 * messages about anything out of the ordinary. If you like #ifdef, you
41 * will love this function.
43 static void __init rcu_bootup_announce_oddness(void)
45 #ifdef CONFIG_RCU_TRACE
46 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
48 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
49 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
52 #ifdef CONFIG_RCU_FANOUT_EXACT
53 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
55 #ifdef CONFIG_RCU_FAST_NO_HZ
57 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
59 #ifdef CONFIG_PROVE_RCU
60 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
62 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
63 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
65 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
66 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
68 #if defined(CONFIG_RCU_CPU_STALL_INFO)
69 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
71 #if NUM_RCU_LVL_4 != 0
72 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
76 #ifdef CONFIG_TREE_PREEMPT_RCU
78 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
79 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
80 static struct rcu_state *rcu_state = &rcu_preempt_state;
82 static void rcu_read_unlock_special(struct task_struct *t);
83 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
86 * Tell them what RCU they are running.
88 static void __init rcu_bootup_announce(void)
90 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
91 rcu_bootup_announce_oddness();
95 * Return the number of RCU-preempt batches processed thus far
96 * for debug and statistics.
98 long rcu_batches_completed_preempt(void)
100 return rcu_preempt_state.completed;
102 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
105 * Return the number of RCU batches processed thus far for debug & stats.
107 long rcu_batches_completed(void)
109 return rcu_batches_completed_preempt();
111 EXPORT_SYMBOL_GPL(rcu_batches_completed);
114 * Force a quiescent state for preemptible RCU.
116 void rcu_force_quiescent_state(void)
118 force_quiescent_state(&rcu_preempt_state, 0);
120 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
123 * Record a preemptible-RCU quiescent state for the specified CPU. Note
124 * that this just means that the task currently running on the CPU is
125 * not in a quiescent state. There might be any number of tasks blocked
126 * while in an RCU read-side critical section.
128 * Unlike the other rcu_*_qs() functions, callers to this function
129 * must disable irqs in order to protect the assignment to
130 * ->rcu_read_unlock_special.
132 static void rcu_preempt_qs(int cpu)
134 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
136 rdp->passed_quiesce_gpnum = rdp->gpnum;
138 if (rdp->passed_quiesce == 0)
139 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
140 rdp->passed_quiesce = 1;
141 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
145 * We have entered the scheduler, and the current task might soon be
146 * context-switched away from. If this task is in an RCU read-side
147 * critical section, we will no longer be able to rely on the CPU to
148 * record that fact, so we enqueue the task on the blkd_tasks list.
149 * The task will dequeue itself when it exits the outermost enclosing
150 * RCU read-side critical section. Therefore, the current grace period
151 * cannot be permitted to complete until the blkd_tasks list entries
152 * predating the current grace period drain, in other words, until
153 * rnp->gp_tasks becomes NULL.
155 * Caller must disable preemption.
157 static void rcu_preempt_note_context_switch(int cpu)
159 struct task_struct *t = current;
161 struct rcu_data *rdp;
162 struct rcu_node *rnp;
164 if (t->rcu_read_lock_nesting > 0 &&
165 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
167 /* Possibly blocking in an RCU read-side critical section. */
168 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
170 raw_spin_lock_irqsave(&rnp->lock, flags);
171 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
172 t->rcu_blocked_node = rnp;
175 * If this CPU has already checked in, then this task
176 * will hold up the next grace period rather than the
177 * current grace period. Queue the task accordingly.
178 * If the task is queued for the current grace period
179 * (i.e., this CPU has not yet passed through a quiescent
180 * state for the current grace period), then as long
181 * as that task remains queued, the current grace period
182 * cannot end. Note that there is some uncertainty as
183 * to exactly when the current grace period started.
184 * We take a conservative approach, which can result
185 * in unnecessarily waiting on tasks that started very
186 * slightly after the current grace period began. C'est
189 * But first, note that the current CPU must still be
192 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
193 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
194 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
195 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
196 rnp->gp_tasks = &t->rcu_node_entry;
197 #ifdef CONFIG_RCU_BOOST
198 if (rnp->boost_tasks != NULL)
199 rnp->boost_tasks = rnp->gp_tasks;
200 #endif /* #ifdef CONFIG_RCU_BOOST */
202 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
203 if (rnp->qsmask & rdp->grpmask)
204 rnp->gp_tasks = &t->rcu_node_entry;
206 trace_rcu_preempt_task(rdp->rsp->name,
208 (rnp->qsmask & rdp->grpmask)
211 raw_spin_unlock_irqrestore(&rnp->lock, flags);
212 } else if (t->rcu_read_lock_nesting < 0 &&
213 t->rcu_read_unlock_special) {
216 * Complete exit from RCU read-side critical section on
217 * behalf of preempted instance of __rcu_read_unlock().
219 rcu_read_unlock_special(t);
223 * Either we were not in an RCU read-side critical section to
224 * begin with, or we have now recorded that critical section
225 * globally. Either way, we can now note a quiescent state
226 * for this CPU. Again, if we were in an RCU read-side critical
227 * section, and if that critical section was blocking the current
228 * grace period, then the fact that the task has been enqueued
229 * means that we continue to block the current grace period.
231 local_irq_save(flags);
233 local_irq_restore(flags);
237 * Tree-preemptible RCU implementation for rcu_read_lock().
238 * Just increment ->rcu_read_lock_nesting, shared state will be updated
241 void __rcu_read_lock(void)
243 current->rcu_read_lock_nesting++;
244 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
246 EXPORT_SYMBOL_GPL(__rcu_read_lock);
249 * Check for preempted RCU readers blocking the current grace period
250 * for the specified rcu_node structure. If the caller needs a reliable
251 * answer, it must hold the rcu_node's ->lock.
253 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
255 return rnp->gp_tasks != NULL;
259 * Record a quiescent state for all tasks that were previously queued
260 * on the specified rcu_node structure and that were blocking the current
261 * RCU grace period. The caller must hold the specified rnp->lock with
262 * irqs disabled, and this lock is released upon return, but irqs remain
265 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
266 __releases(rnp->lock)
269 struct rcu_node *rnp_p;
271 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
272 raw_spin_unlock_irqrestore(&rnp->lock, flags);
273 return; /* Still need more quiescent states! */
279 * Either there is only one rcu_node in the tree,
280 * or tasks were kicked up to root rcu_node due to
281 * CPUs going offline.
283 rcu_report_qs_rsp(&rcu_preempt_state, flags);
287 /* Report up the rest of the hierarchy. */
289 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
290 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
291 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
295 * Advance a ->blkd_tasks-list pointer to the next entry, instead
296 * returning NULL if at the end of the list.
298 static struct list_head *rcu_next_node_entry(struct task_struct *t,
299 struct rcu_node *rnp)
301 struct list_head *np;
303 np = t->rcu_node_entry.next;
304 if (np == &rnp->blkd_tasks)
310 * Handle special cases during rcu_read_unlock(), such as needing to
311 * notify RCU core processing or task having blocked during the RCU
312 * read-side critical section.
314 static noinline void rcu_read_unlock_special(struct task_struct *t)
320 struct list_head *np;
321 #ifdef CONFIG_RCU_BOOST
322 struct rt_mutex *rbmp = NULL;
323 #endif /* #ifdef CONFIG_RCU_BOOST */
324 struct rcu_node *rnp;
327 /* NMI handlers cannot block and cannot safely manipulate state. */
331 local_irq_save(flags);
334 * If RCU core is waiting for this CPU to exit critical section,
335 * let it know that we have done so.
337 special = t->rcu_read_unlock_special;
338 if (special & RCU_READ_UNLOCK_NEED_QS) {
339 rcu_preempt_qs(smp_processor_id());
342 /* Hardware IRQ handlers cannot block. */
343 if (in_irq() || in_serving_softirq()) {
344 local_irq_restore(flags);
348 /* Clean up if blocked during RCU read-side critical section. */
349 if (special & RCU_READ_UNLOCK_BLOCKED) {
350 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
353 * Remove this task from the list it blocked on. The
354 * task can migrate while we acquire the lock, but at
355 * most one time. So at most two passes through loop.
358 rnp = t->rcu_blocked_node;
359 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
360 if (rnp == t->rcu_blocked_node)
362 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
364 empty = !rcu_preempt_blocked_readers_cgp(rnp);
365 empty_exp = !rcu_preempted_readers_exp(rnp);
366 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
367 np = rcu_next_node_entry(t, rnp);
368 list_del_init(&t->rcu_node_entry);
369 t->rcu_blocked_node = NULL;
370 trace_rcu_unlock_preempted_task("rcu_preempt",
372 if (&t->rcu_node_entry == rnp->gp_tasks)
374 if (&t->rcu_node_entry == rnp->exp_tasks)
376 #ifdef CONFIG_RCU_BOOST
377 if (&t->rcu_node_entry == rnp->boost_tasks)
378 rnp->boost_tasks = np;
379 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
380 if (t->rcu_boost_mutex) {
381 rbmp = t->rcu_boost_mutex;
382 t->rcu_boost_mutex = NULL;
384 #endif /* #ifdef CONFIG_RCU_BOOST */
387 * If this was the last task on the current list, and if
388 * we aren't waiting on any CPUs, report the quiescent state.
389 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
390 * so we must take a snapshot of the expedited state.
392 empty_exp_now = !rcu_preempted_readers_exp(rnp);
393 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
394 trace_rcu_quiescent_state_report("preempt_rcu",
401 rcu_report_unblock_qs_rnp(rnp, flags);
403 raw_spin_unlock_irqrestore(&rnp->lock, flags);
405 #ifdef CONFIG_RCU_BOOST
406 /* Unboost if we were boosted. */
408 rt_mutex_unlock(rbmp);
409 #endif /* #ifdef CONFIG_RCU_BOOST */
412 * If this was the last task on the expedited lists,
413 * then we need to report up the rcu_node hierarchy.
415 if (!empty_exp && empty_exp_now)
416 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
418 local_irq_restore(flags);
423 * Tree-preemptible RCU implementation for rcu_read_unlock().
424 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
425 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
426 * invoke rcu_read_unlock_special() to clean up after a context switch
427 * in an RCU read-side critical section and other special cases.
429 void __rcu_read_unlock(void)
431 struct task_struct *t = current;
433 if (t->rcu_read_lock_nesting != 1)
434 --t->rcu_read_lock_nesting;
436 barrier(); /* critical section before exit code. */
437 t->rcu_read_lock_nesting = INT_MIN;
438 barrier(); /* assign before ->rcu_read_unlock_special load */
439 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
440 rcu_read_unlock_special(t);
441 barrier(); /* ->rcu_read_unlock_special load before assign */
442 t->rcu_read_lock_nesting = 0;
444 #ifdef CONFIG_PROVE_LOCKING
446 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
448 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
450 #endif /* #ifdef CONFIG_PROVE_LOCKING */
452 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
454 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
457 * Dump detailed information for all tasks blocking the current RCU
458 * grace period on the specified rcu_node structure.
460 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
463 struct task_struct *t;
465 if (!rcu_preempt_blocked_readers_cgp(rnp))
467 raw_spin_lock_irqsave(&rnp->lock, flags);
468 t = list_entry(rnp->gp_tasks,
469 struct task_struct, rcu_node_entry);
470 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
472 raw_spin_unlock_irqrestore(&rnp->lock, flags);
476 * Dump detailed information for all tasks blocking the current RCU
479 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
481 struct rcu_node *rnp = rcu_get_root(rsp);
483 rcu_print_detail_task_stall_rnp(rnp);
484 rcu_for_each_leaf_node(rsp, rnp)
485 rcu_print_detail_task_stall_rnp(rnp);
488 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
490 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
494 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
496 #ifdef CONFIG_RCU_CPU_STALL_INFO
498 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
500 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
501 rnp->level, rnp->grplo, rnp->grphi);
504 static void rcu_print_task_stall_end(void)
506 printk(KERN_CONT "\n");
509 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
511 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
515 static void rcu_print_task_stall_end(void)
519 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
522 * Scan the current list of tasks blocked within RCU read-side critical
523 * sections, printing out the tid of each.
525 static int rcu_print_task_stall(struct rcu_node *rnp)
527 struct task_struct *t;
530 if (!rcu_preempt_blocked_readers_cgp(rnp))
532 rcu_print_task_stall_begin(rnp);
533 t = list_entry(rnp->gp_tasks,
534 struct task_struct, rcu_node_entry);
535 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
536 printk(KERN_CONT " P%d", t->pid);
539 rcu_print_task_stall_end();
544 * Suppress preemptible RCU's CPU stall warnings by pushing the
545 * time of the next stall-warning message comfortably far into the
548 static void rcu_preempt_stall_reset(void)
550 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
554 * Check that the list of blocked tasks for the newly completed grace
555 * period is in fact empty. It is a serious bug to complete a grace
556 * period that still has RCU readers blocked! This function must be
557 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
558 * must be held by the caller.
560 * Also, if there are blocked tasks on the list, they automatically
561 * block the newly created grace period, so set up ->gp_tasks accordingly.
563 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
565 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
566 if (!list_empty(&rnp->blkd_tasks))
567 rnp->gp_tasks = rnp->blkd_tasks.next;
568 WARN_ON_ONCE(rnp->qsmask);
571 #ifdef CONFIG_HOTPLUG_CPU
574 * Handle tasklist migration for case in which all CPUs covered by the
575 * specified rcu_node have gone offline. Move them up to the root
576 * rcu_node. The reason for not just moving them to the immediate
577 * parent is to remove the need for rcu_read_unlock_special() to
578 * make more than two attempts to acquire the target rcu_node's lock.
579 * Returns true if there were tasks blocking the current RCU grace
582 * Returns 1 if there was previously a task blocking the current grace
583 * period on the specified rcu_node structure.
585 * The caller must hold rnp->lock with irqs disabled.
587 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
588 struct rcu_node *rnp,
589 struct rcu_data *rdp)
591 struct list_head *lp;
592 struct list_head *lp_root;
594 struct rcu_node *rnp_root = rcu_get_root(rsp);
595 struct task_struct *t;
597 if (rnp == rnp_root) {
598 WARN_ONCE(1, "Last CPU thought to be offlined?");
599 return 0; /* Shouldn't happen: at least one CPU online. */
602 /* If we are on an internal node, complain bitterly. */
603 WARN_ON_ONCE(rnp != rdp->mynode);
606 * Move tasks up to root rcu_node. Don't try to get fancy for
607 * this corner-case operation -- just put this node's tasks
608 * at the head of the root node's list, and update the root node's
609 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
610 * if non-NULL. This might result in waiting for more tasks than
611 * absolutely necessary, but this is a good performance/complexity
614 if (rcu_preempt_blocked_readers_cgp(rnp))
615 retval |= RCU_OFL_TASKS_NORM_GP;
616 if (rcu_preempted_readers_exp(rnp))
617 retval |= RCU_OFL_TASKS_EXP_GP;
618 lp = &rnp->blkd_tasks;
619 lp_root = &rnp_root->blkd_tasks;
620 while (!list_empty(lp)) {
621 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
622 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
623 list_del(&t->rcu_node_entry);
624 t->rcu_blocked_node = rnp_root;
625 list_add(&t->rcu_node_entry, lp_root);
626 if (&t->rcu_node_entry == rnp->gp_tasks)
627 rnp_root->gp_tasks = rnp->gp_tasks;
628 if (&t->rcu_node_entry == rnp->exp_tasks)
629 rnp_root->exp_tasks = rnp->exp_tasks;
630 #ifdef CONFIG_RCU_BOOST
631 if (&t->rcu_node_entry == rnp->boost_tasks)
632 rnp_root->boost_tasks = rnp->boost_tasks;
633 #endif /* #ifdef CONFIG_RCU_BOOST */
634 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
637 #ifdef CONFIG_RCU_BOOST
638 /* In case root is being boosted and leaf is not. */
639 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
640 if (rnp_root->boost_tasks != NULL &&
641 rnp_root->boost_tasks != rnp_root->gp_tasks)
642 rnp_root->boost_tasks = rnp_root->gp_tasks;
643 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
644 #endif /* #ifdef CONFIG_RCU_BOOST */
646 rnp->gp_tasks = NULL;
647 rnp->exp_tasks = NULL;
651 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
654 * Do CPU-offline processing for preemptible RCU.
656 static void rcu_preempt_cleanup_dead_cpu(int cpu)
658 rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
662 * Check for a quiescent state from the current CPU. When a task blocks,
663 * the task is recorded in the corresponding CPU's rcu_node structure,
664 * which is checked elsewhere.
666 * Caller must disable hard irqs.
668 static void rcu_preempt_check_callbacks(int cpu)
670 struct task_struct *t = current;
672 if (t->rcu_read_lock_nesting == 0) {
676 if (t->rcu_read_lock_nesting > 0 &&
677 per_cpu(rcu_preempt_data, cpu).qs_pending)
678 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
682 * Process callbacks for preemptible RCU.
684 static void rcu_preempt_process_callbacks(void)
686 __rcu_process_callbacks(&rcu_preempt_state,
687 &__get_cpu_var(rcu_preempt_data));
690 #ifdef CONFIG_RCU_BOOST
692 static void rcu_preempt_do_callbacks(void)
694 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
697 #endif /* #ifdef CONFIG_RCU_BOOST */
700 * Queue a preemptible-RCU callback for invocation after a grace period.
702 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
704 __call_rcu(head, func, &rcu_preempt_state, 0);
706 EXPORT_SYMBOL_GPL(call_rcu);
709 * Queue an RCU callback for lazy invocation after a grace period.
710 * This will likely be later named something like "call_rcu_lazy()",
711 * but this change will require some way of tagging the lazy RCU
712 * callbacks in the list of pending callbacks. Until then, this
713 * function may only be called from __kfree_rcu().
715 void kfree_call_rcu(struct rcu_head *head,
716 void (*func)(struct rcu_head *rcu))
718 __call_rcu(head, func, &rcu_preempt_state, 1);
720 EXPORT_SYMBOL_GPL(kfree_call_rcu);
723 * synchronize_rcu - wait until a grace period has elapsed.
725 * Control will return to the caller some time after a full grace
726 * period has elapsed, in other words after all currently executing RCU
727 * read-side critical sections have completed. Note, however, that
728 * upon return from synchronize_rcu(), the caller might well be executing
729 * concurrently with new RCU read-side critical sections that began while
730 * synchronize_rcu() was waiting. RCU read-side critical sections are
731 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
733 void synchronize_rcu(void)
735 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
736 !lock_is_held(&rcu_lock_map) &&
737 !lock_is_held(&rcu_sched_lock_map),
738 "Illegal synchronize_rcu() in RCU read-side critical section");
739 if (!rcu_scheduler_active)
741 wait_rcu_gp(call_rcu);
743 EXPORT_SYMBOL_GPL(synchronize_rcu);
745 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
746 static long sync_rcu_preempt_exp_count;
747 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
750 * Return non-zero if there are any tasks in RCU read-side critical
751 * sections blocking the current preemptible-RCU expedited grace period.
752 * If there is no preemptible-RCU expedited grace period currently in
753 * progress, returns zero unconditionally.
755 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
757 return rnp->exp_tasks != NULL;
761 * return non-zero if there is no RCU expedited grace period in progress
762 * for the specified rcu_node structure, in other words, if all CPUs and
763 * tasks covered by the specified rcu_node structure have done their bit
764 * for the current expedited grace period. Works only for preemptible
765 * RCU -- other RCU implementation use other means.
767 * Caller must hold sync_rcu_preempt_exp_mutex.
769 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
771 return !rcu_preempted_readers_exp(rnp) &&
772 ACCESS_ONCE(rnp->expmask) == 0;
776 * Report the exit from RCU read-side critical section for the last task
777 * that queued itself during or before the current expedited preemptible-RCU
778 * grace period. This event is reported either to the rcu_node structure on
779 * which the task was queued or to one of that rcu_node structure's ancestors,
780 * recursively up the tree. (Calm down, calm down, we do the recursion
783 * Most callers will set the "wake" flag, but the task initiating the
784 * expedited grace period need not wake itself.
786 * Caller must hold sync_rcu_preempt_exp_mutex.
788 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
794 raw_spin_lock_irqsave(&rnp->lock, flags);
796 if (!sync_rcu_preempt_exp_done(rnp)) {
797 raw_spin_unlock_irqrestore(&rnp->lock, flags);
800 if (rnp->parent == NULL) {
801 raw_spin_unlock_irqrestore(&rnp->lock, flags);
803 wake_up(&sync_rcu_preempt_exp_wq);
807 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
809 raw_spin_lock(&rnp->lock); /* irqs already disabled */
810 rnp->expmask &= ~mask;
815 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
816 * grace period for the specified rcu_node structure. If there are no such
817 * tasks, report it up the rcu_node hierarchy.
819 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
822 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
827 raw_spin_lock_irqsave(&rnp->lock, flags);
828 if (list_empty(&rnp->blkd_tasks))
829 raw_spin_unlock_irqrestore(&rnp->lock, flags);
831 rnp->exp_tasks = rnp->blkd_tasks.next;
832 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
836 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
840 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
841 * is to invoke synchronize_sched_expedited() to push all the tasks to
842 * the ->blkd_tasks lists and wait for this list to drain.
844 void synchronize_rcu_expedited(void)
847 struct rcu_node *rnp;
848 struct rcu_state *rsp = &rcu_preempt_state;
852 smp_mb(); /* Caller's modifications seen first by other CPUs. */
853 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
854 smp_mb(); /* Above access cannot bleed into critical section. */
857 * Acquire lock, falling back to synchronize_rcu() if too many
858 * lock-acquisition failures. Of course, if someone does the
859 * expedited grace period for us, just leave.
861 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
863 udelay(trycount * num_online_cpus());
868 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
869 goto mb_ret; /* Others did our work for us. */
871 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
872 goto unlock_mb_ret; /* Others did our work for us. */
874 /* force all RCU readers onto ->blkd_tasks lists. */
875 synchronize_sched_expedited();
877 raw_spin_lock_irqsave(&rsp->onofflock, flags);
879 /* Initialize ->expmask for all non-leaf rcu_node structures. */
880 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
881 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
882 rnp->expmask = rnp->qsmaskinit;
883 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
886 /* Snapshot current state of ->blkd_tasks lists. */
887 rcu_for_each_leaf_node(rsp, rnp)
888 sync_rcu_preempt_exp_init(rsp, rnp);
889 if (NUM_RCU_NODES > 1)
890 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
892 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
894 /* Wait for snapshotted ->blkd_tasks lists to drain. */
895 rnp = rcu_get_root(rsp);
896 wait_event(sync_rcu_preempt_exp_wq,
897 sync_rcu_preempt_exp_done(rnp));
899 /* Clean up and exit. */
900 smp_mb(); /* ensure expedited GP seen before counter increment. */
901 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
903 mutex_unlock(&sync_rcu_preempt_exp_mutex);
905 smp_mb(); /* ensure subsequent action seen after grace period. */
907 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
910 * Check to see if there is any immediate preemptible-RCU-related work
913 static int rcu_preempt_pending(int cpu)
915 return __rcu_pending(&rcu_preempt_state,
916 &per_cpu(rcu_preempt_data, cpu));
920 * Does preemptible RCU have callbacks on this CPU?
922 static int rcu_preempt_cpu_has_callbacks(int cpu)
924 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
928 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
930 void rcu_barrier(void)
932 _rcu_barrier(&rcu_preempt_state, call_rcu);
934 EXPORT_SYMBOL_GPL(rcu_barrier);
937 * Initialize preemptible RCU's per-CPU data.
939 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
941 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
945 * Move preemptible RCU's callbacks from dying CPU to other online CPU
946 * and record a quiescent state.
948 static void rcu_preempt_cleanup_dying_cpu(void)
950 rcu_cleanup_dying_cpu(&rcu_preempt_state);
954 * Initialize preemptible RCU's state structures.
956 static void __init __rcu_init_preempt(void)
958 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
962 * Check for a task exiting while in a preemptible-RCU read-side
963 * critical section, clean up if so. No need to issue warnings,
964 * as debug_check_no_locks_held() already does this if lockdep
969 struct task_struct *t = current;
971 if (t->rcu_read_lock_nesting == 0)
973 t->rcu_read_lock_nesting = 1;
977 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
979 static struct rcu_state *rcu_state = &rcu_sched_state;
982 * Tell them what RCU they are running.
984 static void __init rcu_bootup_announce(void)
986 printk(KERN_INFO "Hierarchical RCU implementation.\n");
987 rcu_bootup_announce_oddness();
991 * Return the number of RCU batches processed thus far for debug & stats.
993 long rcu_batches_completed(void)
995 return rcu_batches_completed_sched();
997 EXPORT_SYMBOL_GPL(rcu_batches_completed);
1000 * Force a quiescent state for RCU, which, because there is no preemptible
1001 * RCU, becomes the same as rcu-sched.
1003 void rcu_force_quiescent_state(void)
1005 rcu_sched_force_quiescent_state();
1007 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
1010 * Because preemptible RCU does not exist, we never have to check for
1011 * CPUs being in quiescent states.
1013 static void rcu_preempt_note_context_switch(int cpu)
1018 * Because preemptible RCU does not exist, there are never any preempted
1021 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
1026 #ifdef CONFIG_HOTPLUG_CPU
1028 /* Because preemptible RCU does not exist, no quieting of tasks. */
1029 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1031 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1034 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1037 * Because preemptible RCU does not exist, we never have to check for
1038 * tasks blocked within RCU read-side critical sections.
1040 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1045 * Because preemptible RCU does not exist, we never have to check for
1046 * tasks blocked within RCU read-side critical sections.
1048 static int rcu_print_task_stall(struct rcu_node *rnp)
1054 * Because preemptible RCU does not exist, there is no need to suppress
1055 * its CPU stall warnings.
1057 static void rcu_preempt_stall_reset(void)
1062 * Because there is no preemptible RCU, there can be no readers blocked,
1063 * so there is no need to check for blocked tasks. So check only for
1064 * bogus qsmask values.
1066 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1068 WARN_ON_ONCE(rnp->qsmask);
1071 #ifdef CONFIG_HOTPLUG_CPU
1074 * Because preemptible RCU does not exist, it never needs to migrate
1075 * tasks that were blocked within RCU read-side critical sections, and
1076 * such non-existent tasks cannot possibly have been blocking the current
1079 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1080 struct rcu_node *rnp,
1081 struct rcu_data *rdp)
1086 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1089 * Because preemptible RCU does not exist, it never needs CPU-offline
1092 static void rcu_preempt_cleanup_dead_cpu(int cpu)
1097 * Because preemptible RCU does not exist, it never has any callbacks
1100 static void rcu_preempt_check_callbacks(int cpu)
1105 * Because preemptible RCU does not exist, it never has any callbacks
1108 static void rcu_preempt_process_callbacks(void)
1113 * Queue an RCU callback for lazy invocation after a grace period.
1114 * This will likely be later named something like "call_rcu_lazy()",
1115 * but this change will require some way of tagging the lazy RCU
1116 * callbacks in the list of pending callbacks. Until then, this
1117 * function may only be called from __kfree_rcu().
1119 * Because there is no preemptible RCU, we use RCU-sched instead.
1121 void kfree_call_rcu(struct rcu_head *head,
1122 void (*func)(struct rcu_head *rcu))
1124 __call_rcu(head, func, &rcu_sched_state, 1);
1126 EXPORT_SYMBOL_GPL(kfree_call_rcu);
1129 * Wait for an rcu-preempt grace period, but make it happen quickly.
1130 * But because preemptible RCU does not exist, map to rcu-sched.
1132 void synchronize_rcu_expedited(void)
1134 synchronize_sched_expedited();
1136 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1138 #ifdef CONFIG_HOTPLUG_CPU
1141 * Because preemptible RCU does not exist, there is never any need to
1142 * report on tasks preempted in RCU read-side critical sections during
1143 * expedited RCU grace periods.
1145 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1150 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1153 * Because preemptible RCU does not exist, it never has any work to do.
1155 static int rcu_preempt_pending(int cpu)
1161 * Because preemptible RCU does not exist, it never has callbacks
1163 static int rcu_preempt_cpu_has_callbacks(int cpu)
1169 * Because preemptible RCU does not exist, rcu_barrier() is just
1170 * another name for rcu_barrier_sched().
1172 void rcu_barrier(void)
1174 rcu_barrier_sched();
1176 EXPORT_SYMBOL_GPL(rcu_barrier);
1179 * Because preemptible RCU does not exist, there is no per-CPU
1180 * data to initialize.
1182 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1187 * Because there is no preemptible RCU, there is no cleanup to do.
1189 static void rcu_preempt_cleanup_dying_cpu(void)
1194 * Because preemptible RCU does not exist, it need not be initialized.
1196 static void __init __rcu_init_preempt(void)
1200 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1202 #ifdef CONFIG_RCU_BOOST
1204 #include "rtmutex_common.h"
1206 #ifdef CONFIG_RCU_TRACE
1208 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1210 if (list_empty(&rnp->blkd_tasks))
1211 rnp->n_balk_blkd_tasks++;
1212 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1213 rnp->n_balk_exp_gp_tasks++;
1214 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1215 rnp->n_balk_boost_tasks++;
1216 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1217 rnp->n_balk_notblocked++;
1218 else if (rnp->gp_tasks != NULL &&
1219 ULONG_CMP_LT(jiffies, rnp->boost_time))
1220 rnp->n_balk_notyet++;
1225 #else /* #ifdef CONFIG_RCU_TRACE */
1227 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1231 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1234 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1235 * or ->boost_tasks, advancing the pointer to the next task in the
1236 * ->blkd_tasks list.
1238 * Note that irqs must be enabled: boosting the task can block.
1239 * Returns 1 if there are more tasks needing to be boosted.
1241 static int rcu_boost(struct rcu_node *rnp)
1243 unsigned long flags;
1244 struct rt_mutex mtx;
1245 struct task_struct *t;
1246 struct list_head *tb;
1248 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1249 return 0; /* Nothing left to boost. */
1251 raw_spin_lock_irqsave(&rnp->lock, flags);
1254 * Recheck under the lock: all tasks in need of boosting
1255 * might exit their RCU read-side critical sections on their own.
1257 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1258 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1263 * Preferentially boost tasks blocking expedited grace periods.
1264 * This cannot starve the normal grace periods because a second
1265 * expedited grace period must boost all blocked tasks, including
1266 * those blocking the pre-existing normal grace period.
1268 if (rnp->exp_tasks != NULL) {
1269 tb = rnp->exp_tasks;
1270 rnp->n_exp_boosts++;
1272 tb = rnp->boost_tasks;
1273 rnp->n_normal_boosts++;
1275 rnp->n_tasks_boosted++;
1278 * We boost task t by manufacturing an rt_mutex that appears to
1279 * be held by task t. We leave a pointer to that rt_mutex where
1280 * task t can find it, and task t will release the mutex when it
1281 * exits its outermost RCU read-side critical section. Then
1282 * simply acquiring this artificial rt_mutex will boost task
1283 * t's priority. (Thanks to tglx for suggesting this approach!)
1285 * Note that task t must acquire rnp->lock to remove itself from
1286 * the ->blkd_tasks list, which it will do from exit() if from
1287 * nowhere else. We therefore are guaranteed that task t will
1288 * stay around at least until we drop rnp->lock. Note that
1289 * rnp->lock also resolves races between our priority boosting
1290 * and task t's exiting its outermost RCU read-side critical
1293 t = container_of(tb, struct task_struct, rcu_node_entry);
1294 rt_mutex_init_proxy_locked(&mtx, t);
1295 t->rcu_boost_mutex = &mtx;
1296 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1297 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1298 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1300 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1301 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1305 * Timer handler to initiate waking up of boost kthreads that
1306 * have yielded the CPU due to excessive numbers of tasks to
1307 * boost. We wake up the per-rcu_node kthread, which in turn
1308 * will wake up the booster kthread.
1310 static void rcu_boost_kthread_timer(unsigned long arg)
1312 invoke_rcu_node_kthread((struct rcu_node *)arg);
1316 * Priority-boosting kthread. One per leaf rcu_node and one for the
1319 static int rcu_boost_kthread(void *arg)
1321 struct rcu_node *rnp = (struct rcu_node *)arg;
1325 trace_rcu_utilization("Start boost kthread@init");
1327 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1328 trace_rcu_utilization("End boost kthread@rcu_wait");
1329 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1330 trace_rcu_utilization("Start boost kthread@rcu_wait");
1331 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1332 more2boost = rcu_boost(rnp);
1338 trace_rcu_utilization("End boost kthread@rcu_yield");
1339 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1340 trace_rcu_utilization("Start boost kthread@rcu_yield");
1345 trace_rcu_utilization("End boost kthread@notreached");
1350 * Check to see if it is time to start boosting RCU readers that are
1351 * blocking the current grace period, and, if so, tell the per-rcu_node
1352 * kthread to start boosting them. If there is an expedited grace
1353 * period in progress, it is always time to boost.
1355 * The caller must hold rnp->lock, which this function releases,
1356 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1357 * so we don't need to worry about it going away.
1359 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1361 struct task_struct *t;
1363 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1364 rnp->n_balk_exp_gp_tasks++;
1365 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1368 if (rnp->exp_tasks != NULL ||
1369 (rnp->gp_tasks != NULL &&
1370 rnp->boost_tasks == NULL &&
1372 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1373 if (rnp->exp_tasks == NULL)
1374 rnp->boost_tasks = rnp->gp_tasks;
1375 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1376 t = rnp->boost_kthread_task;
1380 rcu_initiate_boost_trace(rnp);
1381 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1386 * Wake up the per-CPU kthread to invoke RCU callbacks.
1388 static void invoke_rcu_callbacks_kthread(void)
1390 unsigned long flags;
1392 local_irq_save(flags);
1393 __this_cpu_write(rcu_cpu_has_work, 1);
1394 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1395 current != __this_cpu_read(rcu_cpu_kthread_task))
1396 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1397 local_irq_restore(flags);
1401 * Is the current CPU running the RCU-callbacks kthread?
1402 * Caller must have preemption disabled.
1404 static bool rcu_is_callbacks_kthread(void)
1406 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1410 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1411 * held, so no one should be messing with the existence of the boost
1414 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1417 struct task_struct *t;
1419 t = rnp->boost_kthread_task;
1421 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1424 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1427 * Do priority-boost accounting for the start of a new grace period.
1429 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1431 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1435 * Create an RCU-boost kthread for the specified node if one does not
1436 * already exist. We only create this kthread for preemptible RCU.
1437 * Returns zero if all is well, a negated errno otherwise.
1439 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1440 struct rcu_node *rnp,
1443 unsigned long flags;
1444 struct sched_param sp;
1445 struct task_struct *t;
1447 if (&rcu_preempt_state != rsp)
1450 if (rnp->boost_kthread_task != NULL)
1452 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1453 "rcub/%d", rnp_index);
1456 raw_spin_lock_irqsave(&rnp->lock, flags);
1457 rnp->boost_kthread_task = t;
1458 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1459 sp.sched_priority = RCU_BOOST_PRIO;
1460 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1461 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1465 #ifdef CONFIG_HOTPLUG_CPU
1468 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1470 static void rcu_stop_cpu_kthread(int cpu)
1472 struct task_struct *t;
1474 /* Stop the CPU's kthread. */
1475 t = per_cpu(rcu_cpu_kthread_task, cpu);
1477 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1482 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1484 static void rcu_kthread_do_work(void)
1486 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1487 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1488 rcu_preempt_do_callbacks();
1492 * Wake up the specified per-rcu_node-structure kthread.
1493 * Because the per-rcu_node kthreads are immortal, we don't need
1494 * to do anything to keep them alive.
1496 static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1498 struct task_struct *t;
1500 t = rnp->node_kthread_task;
1506 * Set the specified CPU's kthread to run RT or not, as specified by
1507 * the to_rt argument. The CPU-hotplug locks are held, so the task
1508 * is not going away.
1510 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1513 struct sched_param sp;
1514 struct task_struct *t;
1516 t = per_cpu(rcu_cpu_kthread_task, cpu);
1520 policy = SCHED_FIFO;
1521 sp.sched_priority = RCU_KTHREAD_PRIO;
1523 policy = SCHED_NORMAL;
1524 sp.sched_priority = 0;
1526 sched_setscheduler_nocheck(t, policy, &sp);
1530 * Timer handler to initiate the waking up of per-CPU kthreads that
1531 * have yielded the CPU due to excess numbers of RCU callbacks.
1532 * We wake up the per-rcu_node kthread, which in turn will wake up
1533 * the booster kthread.
1535 static void rcu_cpu_kthread_timer(unsigned long arg)
1537 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1538 struct rcu_node *rnp = rdp->mynode;
1540 atomic_or(rdp->grpmask, &rnp->wakemask);
1541 invoke_rcu_node_kthread(rnp);
1545 * Drop to non-real-time priority and yield, but only after posting a
1546 * timer that will cause us to regain our real-time priority if we
1547 * remain preempted. Either way, we restore our real-time priority
1550 static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1552 struct sched_param sp;
1553 struct timer_list yield_timer;
1554 int prio = current->rt_priority;
1556 setup_timer_on_stack(&yield_timer, f, arg);
1557 mod_timer(&yield_timer, jiffies + 2);
1558 sp.sched_priority = 0;
1559 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1560 set_user_nice(current, 19);
1562 set_user_nice(current, 0);
1563 sp.sched_priority = prio;
1564 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1565 del_timer(&yield_timer);
1569 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1570 * This can happen while the corresponding CPU is either coming online
1571 * or going offline. We cannot wait until the CPU is fully online
1572 * before starting the kthread, because the various notifier functions
1573 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1574 * the corresponding CPU is online.
1576 * Return 1 if the kthread needs to stop, 0 otherwise.
1578 * Caller must disable bh. This function can momentarily enable it.
1580 static int rcu_cpu_kthread_should_stop(int cpu)
1582 while (cpu_is_offline(cpu) ||
1583 !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) ||
1584 smp_processor_id() != cpu) {
1585 if (kthread_should_stop())
1587 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1588 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1590 schedule_timeout_uninterruptible(1);
1591 if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)))
1592 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1595 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1600 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1601 * RCU softirq used in flavors and configurations of RCU that do not
1602 * support RCU priority boosting.
1604 static int rcu_cpu_kthread(void *arg)
1606 int cpu = (int)(long)arg;
1607 unsigned long flags;
1609 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1611 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1613 trace_rcu_utilization("Start CPU kthread@init");
1615 *statusp = RCU_KTHREAD_WAITING;
1616 trace_rcu_utilization("End CPU kthread@rcu_wait");
1617 rcu_wait(*workp != 0 || kthread_should_stop());
1618 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1620 if (rcu_cpu_kthread_should_stop(cpu)) {
1624 *statusp = RCU_KTHREAD_RUNNING;
1625 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1626 local_irq_save(flags);
1629 local_irq_restore(flags);
1631 rcu_kthread_do_work();
1638 *statusp = RCU_KTHREAD_YIELDING;
1639 trace_rcu_utilization("End CPU kthread@rcu_yield");
1640 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1641 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1645 *statusp = RCU_KTHREAD_STOPPED;
1646 trace_rcu_utilization("End CPU kthread@term");
1651 * Spawn a per-CPU kthread, setting up affinity and priority.
1652 * Because the CPU hotplug lock is held, no other CPU will be attempting
1653 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1654 * attempting to access it during boot, but the locking in kthread_bind()
1655 * will enforce sufficient ordering.
1657 * Please note that we cannot simply refuse to wake up the per-CPU
1658 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1659 * which can result in softlockup complaints if the task ends up being
1660 * idle for more than a couple of minutes.
1662 * However, please note also that we cannot bind the per-CPU kthread to its
1663 * CPU until that CPU is fully online. We also cannot wait until the
1664 * CPU is fully online before we create its per-CPU kthread, as this would
1665 * deadlock the system when CPU notifiers tried waiting for grace
1666 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1667 * is online. If its CPU is not yet fully online, then the code in
1668 * rcu_cpu_kthread() will wait until it is fully online, and then do
1671 static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1673 struct sched_param sp;
1674 struct task_struct *t;
1676 if (!rcu_scheduler_fully_active ||
1677 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1679 t = kthread_create_on_node(rcu_cpu_kthread,
1685 if (cpu_online(cpu))
1686 kthread_bind(t, cpu);
1687 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1688 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1689 sp.sched_priority = RCU_KTHREAD_PRIO;
1690 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1691 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1692 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1697 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1698 * kthreads when needed. We ignore requests to wake up kthreads
1699 * for offline CPUs, which is OK because force_quiescent_state()
1700 * takes care of this case.
1702 static int rcu_node_kthread(void *arg)
1705 unsigned long flags;
1707 struct rcu_node *rnp = (struct rcu_node *)arg;
1708 struct sched_param sp;
1709 struct task_struct *t;
1712 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1713 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1714 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1715 raw_spin_lock_irqsave(&rnp->lock, flags);
1716 mask = atomic_xchg(&rnp->wakemask, 0);
1717 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1718 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1719 if ((mask & 0x1) == 0)
1722 t = per_cpu(rcu_cpu_kthread_task, cpu);
1723 if (!cpu_online(cpu) || t == NULL) {
1727 per_cpu(rcu_cpu_has_work, cpu) = 1;
1728 sp.sched_priority = RCU_KTHREAD_PRIO;
1729 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1734 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1739 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1740 * served by the rcu_node in question. The CPU hotplug lock is still
1741 * held, so the value of rnp->qsmaskinit will be stable.
1743 * We don't include outgoingcpu in the affinity set, use -1 if there is
1744 * no outgoing CPU. If there are no CPUs left in the affinity set,
1745 * this function allows the kthread to execute on any CPU.
1747 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1751 unsigned long mask = rnp->qsmaskinit;
1753 if (rnp->node_kthread_task == NULL)
1755 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1758 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1759 if ((mask & 0x1) && cpu != outgoingcpu)
1760 cpumask_set_cpu(cpu, cm);
1761 if (cpumask_weight(cm) == 0) {
1763 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1764 cpumask_clear_cpu(cpu, cm);
1765 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1767 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1768 rcu_boost_kthread_setaffinity(rnp, cm);
1769 free_cpumask_var(cm);
1773 * Spawn a per-rcu_node kthread, setting priority and affinity.
1774 * Called during boot before online/offline can happen, or, if
1775 * during runtime, with the main CPU-hotplug locks held. So only
1776 * one of these can be executing at a time.
1778 static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1779 struct rcu_node *rnp)
1781 unsigned long flags;
1782 int rnp_index = rnp - &rsp->node[0];
1783 struct sched_param sp;
1784 struct task_struct *t;
1786 if (!rcu_scheduler_fully_active ||
1787 rnp->qsmaskinit == 0)
1789 if (rnp->node_kthread_task == NULL) {
1790 t = kthread_create(rcu_node_kthread, (void *)rnp,
1791 "rcun/%d", rnp_index);
1794 raw_spin_lock_irqsave(&rnp->lock, flags);
1795 rnp->node_kthread_task = t;
1796 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1797 sp.sched_priority = 99;
1798 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1799 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1801 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1805 * Spawn all kthreads -- called as soon as the scheduler is running.
1807 static int __init rcu_spawn_kthreads(void)
1810 struct rcu_node *rnp;
1812 rcu_scheduler_fully_active = 1;
1813 for_each_possible_cpu(cpu) {
1814 per_cpu(rcu_cpu_has_work, cpu) = 0;
1815 if (cpu_online(cpu))
1816 (void)rcu_spawn_one_cpu_kthread(cpu);
1818 rnp = rcu_get_root(rcu_state);
1819 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1820 if (NUM_RCU_NODES > 1) {
1821 rcu_for_each_leaf_node(rcu_state, rnp)
1822 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1826 early_initcall(rcu_spawn_kthreads);
1828 static void __cpuinit rcu_prepare_kthreads(int cpu)
1830 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1831 struct rcu_node *rnp = rdp->mynode;
1833 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1834 if (rcu_scheduler_fully_active) {
1835 (void)rcu_spawn_one_cpu_kthread(cpu);
1836 if (rnp->node_kthread_task == NULL)
1837 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1841 #else /* #ifdef CONFIG_RCU_BOOST */
1843 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1845 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1848 static void invoke_rcu_callbacks_kthread(void)
1853 static bool rcu_is_callbacks_kthread(void)
1858 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1862 #ifdef CONFIG_HOTPLUG_CPU
1864 static void rcu_stop_cpu_kthread(int cpu)
1868 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1870 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1874 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1878 static int __init rcu_scheduler_really_started(void)
1880 rcu_scheduler_fully_active = 1;
1883 early_initcall(rcu_scheduler_really_started);
1885 static void __cpuinit rcu_prepare_kthreads(int cpu)
1889 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1891 static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1892 static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1894 static int synchronize_sched_expedited_cpu_stop(void *data)
1897 * There must be a full memory barrier on each affected CPU
1898 * between the time that try_stop_cpus() is called and the
1899 * time that it returns.
1901 * In the current initial implementation of cpu_stop, the
1902 * above condition is already met when the control reaches
1903 * this point and the following smp_mb() is not strictly
1904 * necessary. Do smp_mb() anyway for documentation and
1905 * robustness against future implementation changes.
1907 smp_mb(); /* See above comment block. */
1912 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1913 * approach to force grace period to end quickly. This consumes
1914 * significant time on all CPUs, and is thus not recommended for
1915 * any sort of common-case code.
1917 * Note that it is illegal to call this function while holding any
1918 * lock that is acquired by a CPU-hotplug notifier. Failing to
1919 * observe this restriction will result in deadlock.
1921 * This implementation can be thought of as an application of ticket
1922 * locking to RCU, with sync_sched_expedited_started and
1923 * sync_sched_expedited_done taking on the roles of the halves
1924 * of the ticket-lock word. Each task atomically increments
1925 * sync_sched_expedited_started upon entry, snapshotting the old value,
1926 * then attempts to stop all the CPUs. If this succeeds, then each
1927 * CPU will have executed a context switch, resulting in an RCU-sched
1928 * grace period. We are then done, so we use atomic_cmpxchg() to
1929 * update sync_sched_expedited_done to match our snapshot -- but
1930 * only if someone else has not already advanced past our snapshot.
1932 * On the other hand, if try_stop_cpus() fails, we check the value
1933 * of sync_sched_expedited_done. If it has advanced past our
1934 * initial snapshot, then someone else must have forced a grace period
1935 * some time after we took our snapshot. In this case, our work is
1936 * done for us, and we can simply return. Otherwise, we try again,
1937 * but keep our initial snapshot for purposes of checking for someone
1938 * doing our work for us.
1940 * If we fail too many times in a row, we fall back to synchronize_sched().
1942 void synchronize_sched_expedited(void)
1944 int firstsnap, s, snap, trycount = 0;
1946 /* Note that atomic_inc_return() implies full memory barrier. */
1947 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1951 * Each pass through the following loop attempts to force a
1952 * context switch on each CPU.
1954 while (try_stop_cpus(cpu_online_mask,
1955 synchronize_sched_expedited_cpu_stop,
1959 /* No joy, try again later. Or just synchronize_sched(). */
1960 if (trycount++ < 10)
1961 udelay(trycount * num_online_cpus());
1963 synchronize_sched();
1967 /* Check to see if someone else did our work for us. */
1968 s = atomic_read(&sync_sched_expedited_done);
1969 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
1970 smp_mb(); /* ensure test happens before caller kfree */
1975 * Refetching sync_sched_expedited_started allows later
1976 * callers to piggyback on our grace period. We subtract
1977 * 1 to get the same token that the last incrementer got.
1978 * We retry after they started, so our grace period works
1979 * for them, and they started after our first try, so their
1980 * grace period works for us.
1983 snap = atomic_read(&sync_sched_expedited_started);
1984 smp_mb(); /* ensure read is before try_stop_cpus(). */
1988 * Everyone up to our most recent fetch is covered by our grace
1989 * period. Update the counter, but only if our work is still
1990 * relevant -- which it won't be if someone who started later
1991 * than we did beat us to the punch.
1994 s = atomic_read(&sync_sched_expedited_done);
1995 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1996 smp_mb(); /* ensure test happens before caller kfree */
1999 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
2003 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
2005 #if !defined(CONFIG_RCU_FAST_NO_HZ)
2008 * Check to see if any future RCU-related work will need to be done
2009 * by the current CPU, even if none need be done immediately, returning
2010 * 1 if so. This function is part of the RCU implementation; it is -not-
2011 * an exported member of the RCU API.
2013 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
2014 * any flavor of RCU.
2016 int rcu_needs_cpu(int cpu)
2018 return rcu_cpu_has_callbacks(cpu);
2022 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
2024 static void rcu_prepare_for_idle_init(int cpu)
2029 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
2032 static void rcu_cleanup_after_idle(int cpu)
2037 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
2040 static void rcu_prepare_for_idle(int cpu)
2044 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2047 * This code is invoked when a CPU goes idle, at which point we want
2048 * to have the CPU do everything required for RCU so that it can enter
2049 * the energy-efficient dyntick-idle mode. This is handled by a
2050 * state machine implemented by rcu_prepare_for_idle() below.
2052 * The following three proprocessor symbols control this state machine:
2054 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
2055 * to satisfy RCU. Beyond this point, it is better to incur a periodic
2056 * scheduling-clock interrupt than to loop through the state machine
2058 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
2059 * optional if RCU does not need anything immediately from this
2060 * CPU, even if this CPU still has RCU callbacks queued. The first
2061 * times through the state machine are mandatory: we need to give
2062 * the state machine a chance to communicate a quiescent state
2064 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
2065 * to sleep in dyntick-idle mode with RCU callbacks pending. This
2066 * is sized to be roughly one RCU grace period. Those energy-efficiency
2067 * benchmarkers who might otherwise be tempted to set this to a large
2068 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
2069 * system. And if you are -that- concerned about energy efficiency,
2070 * just power the system down and be done with it!
2071 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
2072 * permitted to sleep in dyntick-idle mode with only lazy RCU
2073 * callbacks pending. Setting this too high can OOM your system.
2075 * The values below work well in practice. If future workloads require
2076 * adjustment, they can be converted into kernel config parameters, though
2077 * making the state machine smarter might be a better option.
2079 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
2080 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
2081 #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
2082 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
2084 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
2085 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
2086 static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
2087 static ktime_t rcu_idle_gp_wait; /* If some non-lazy callbacks. */
2088 static ktime_t rcu_idle_lazy_gp_wait; /* If only lazy callbacks. */
2091 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
2092 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
2093 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
2094 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
2095 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
2096 * it is better to incur scheduling-clock interrupts than to spin
2097 * continuously for the same time duration!
2099 int rcu_needs_cpu(int cpu)
2101 /* If no callbacks, RCU doesn't need the CPU. */
2102 if (!rcu_cpu_has_callbacks(cpu))
2104 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
2105 return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
2109 * Does the specified flavor of RCU have non-lazy callbacks pending on
2110 * the specified CPU? Both RCU flavor and CPU are specified by the
2111 * rcu_data structure.
2113 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
2115 return rdp->qlen != rdp->qlen_lazy;
2118 #ifdef CONFIG_TREE_PREEMPT_RCU
2121 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
2122 * is no RCU-preempt in the kernel.)
2124 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2126 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
2128 return __rcu_cpu_has_nonlazy_callbacks(rdp);
2131 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2133 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2138 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
2141 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
2143 static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
2145 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
2146 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
2147 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
2151 * Timer handler used to force CPU to start pushing its remaining RCU
2152 * callbacks in the case where it entered dyntick-idle mode with callbacks
2153 * pending. The hander doesn't really need to do anything because the
2154 * real work is done upon re-entry to idle, or by the next scheduling-clock
2155 * interrupt should idle not be re-entered.
2157 static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
2159 trace_rcu_prep_idle("Timer");
2160 return HRTIMER_NORESTART;
2164 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2166 static void rcu_prepare_for_idle_init(int cpu)
2168 static int firsttime = 1;
2169 struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2171 hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2172 hrtp->function = rcu_idle_gp_timer_func;
2174 unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
2176 rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
2177 upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY);
2178 rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000);
2184 * Clean up for exit from idle. Because we are exiting from idle, there
2185 * is no longer any point to rcu_idle_gp_timer, so cancel it. This will
2186 * do nothing if this timer is not active, so just cancel it unconditionally.
2188 static void rcu_cleanup_after_idle(int cpu)
2190 hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
2194 * Check to see if any RCU-related work can be done by the current CPU,
2195 * and if so, schedule a softirq to get it done. This function is part
2196 * of the RCU implementation; it is -not- an exported member of the RCU API.
2198 * The idea is for the current CPU to clear out all work required by the
2199 * RCU core for the current grace period, so that this CPU can be permitted
2200 * to enter dyntick-idle mode. In some cases, it will need to be awakened
2201 * at the end of the grace period by whatever CPU ends the grace period.
2202 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2203 * number of wakeups by a modest integer factor.
2205 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2206 * disabled, we do one pass of force_quiescent_state(), then do a
2207 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2208 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
2210 * The caller must have disabled interrupts.
2212 static void rcu_prepare_for_idle(int cpu)
2214 unsigned long flags;
2216 local_irq_save(flags);
2219 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2220 * Also reset state to avoid prejudicing later attempts.
2222 if (!rcu_cpu_has_callbacks(cpu)) {
2223 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2224 per_cpu(rcu_dyntick_drain, cpu) = 0;
2225 local_irq_restore(flags);
2226 trace_rcu_prep_idle("No callbacks");
2231 * If in holdoff mode, just return. We will presumably have
2232 * refrained from disabling the scheduling-clock tick.
2234 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
2235 local_irq_restore(flags);
2236 trace_rcu_prep_idle("In holdoff");
2240 /* Check and update the rcu_dyntick_drain sequencing. */
2241 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2242 /* First time through, initialize the counter. */
2243 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
2244 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
2245 !rcu_pending(cpu)) {
2246 /* Can we go dyntick-idle despite still having callbacks? */
2247 trace_rcu_prep_idle("Dyntick with callbacks");
2248 per_cpu(rcu_dyntick_drain, cpu) = 0;
2249 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2250 if (rcu_cpu_has_nonlazy_callbacks(cpu))
2251 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2252 rcu_idle_gp_wait, HRTIMER_MODE_REL);
2254 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2255 rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL);
2256 return; /* Nothing more to do immediately. */
2257 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2258 /* We have hit the limit, so time to give up. */
2259 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2260 local_irq_restore(flags);
2261 trace_rcu_prep_idle("Begin holdoff");
2262 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2267 * Do one step of pushing the remaining RCU callbacks through
2268 * the RCU core state machine.
2270 #ifdef CONFIG_TREE_PREEMPT_RCU
2271 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2272 local_irq_restore(flags);
2273 rcu_preempt_qs(cpu);
2274 force_quiescent_state(&rcu_preempt_state, 0);
2275 local_irq_save(flags);
2277 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2278 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2279 local_irq_restore(flags);
2281 force_quiescent_state(&rcu_sched_state, 0);
2282 local_irq_save(flags);
2284 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2285 local_irq_restore(flags);
2287 force_quiescent_state(&rcu_bh_state, 0);
2288 local_irq_save(flags);
2292 * If RCU callbacks are still pending, RCU still needs this CPU.
2293 * So try forcing the callbacks through the grace period.
2295 if (rcu_cpu_has_callbacks(cpu)) {
2296 local_irq_restore(flags);
2297 trace_rcu_prep_idle("More callbacks");
2300 local_irq_restore(flags);
2301 trace_rcu_prep_idle("Callbacks drained");
2305 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2307 #ifdef CONFIG_RCU_CPU_STALL_INFO
2309 #ifdef CONFIG_RCU_FAST_NO_HZ
2311 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2313 struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2315 sprintf(cp, "drain=%d %c timer=%lld",
2316 per_cpu(rcu_dyntick_drain, cpu),
2317 per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
2318 hrtimer_active(hrtp)
2319 ? ktime_to_us(hrtimer_get_remaining(hrtp))
2323 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2325 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2329 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2331 /* Initiate the stall-info list. */
2332 static void print_cpu_stall_info_begin(void)
2334 printk(KERN_CONT "\n");
2338 * Print out diagnostic information for the specified stalled CPU.
2340 * If the specified CPU is aware of the current RCU grace period
2341 * (flavor specified by rsp), then print the number of scheduling
2342 * clock interrupts the CPU has taken during the time that it has
2343 * been aware. Otherwise, print the number of RCU grace periods
2344 * that this CPU is ignorant of, for example, "1" if the CPU was
2345 * aware of the previous grace period.
2347 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2349 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2351 char fast_no_hz[72];
2352 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2353 struct rcu_dynticks *rdtp = rdp->dynticks;
2355 unsigned long ticks_value;
2357 if (rsp->gpnum == rdp->gpnum) {
2358 ticks_title = "ticks this GP";
2359 ticks_value = rdp->ticks_this_gp;
2361 ticks_title = "GPs behind";
2362 ticks_value = rsp->gpnum - rdp->gpnum;
2364 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2365 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2366 cpu, ticks_value, ticks_title,
2367 atomic_read(&rdtp->dynticks) & 0xfff,
2368 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2372 /* Terminate the stall-info list. */
2373 static void print_cpu_stall_info_end(void)
2375 printk(KERN_ERR "\t");
2378 /* Zero ->ticks_this_gp for all flavors of RCU. */
2379 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2381 rdp->ticks_this_gp = 0;
2384 /* Increment ->ticks_this_gp for all flavors of RCU. */
2385 static void increment_cpu_stall_ticks(void)
2387 __get_cpu_var(rcu_sched_data).ticks_this_gp++;
2388 __get_cpu_var(rcu_bh_data).ticks_this_gp++;
2389 #ifdef CONFIG_TREE_PREEMPT_RCU
2390 __get_cpu_var(rcu_preempt_data).ticks_this_gp++;
2391 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2394 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2396 static void print_cpu_stall_info_begin(void)
2398 printk(KERN_CONT " {");
2401 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2403 printk(KERN_CONT " %d", cpu);
2406 static void print_cpu_stall_info_end(void)
2408 printk(KERN_CONT "} ");
2411 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2415 static void increment_cpu_stall_ticks(void)
2419 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */