2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright IBM Corporation, 2008
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 #include <linux/cache.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/cpumask.h>
29 #include <linux/seqlock.h>
32 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
33 * CONFIG_RCU_FANOUT_LEAF.
34 * In theory, it should be possible to add more levels straightforwardly.
35 * In practice, this did work well going from three levels to four.
36 * Of course, your mileage may vary.
38 #define MAX_RCU_LVLS 4
39 #define RCU_FANOUT_1 (CONFIG_RCU_FANOUT_LEAF)
40 #define RCU_FANOUT_2 (RCU_FANOUT_1 * CONFIG_RCU_FANOUT)
41 #define RCU_FANOUT_3 (RCU_FANOUT_2 * CONFIG_RCU_FANOUT)
42 #define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
44 #if NR_CPUS <= RCU_FANOUT_1
45 # define RCU_NUM_LVLS 1
46 # define NUM_RCU_LVL_0 1
47 # define NUM_RCU_LVL_1 (NR_CPUS)
48 # define NUM_RCU_LVL_2 0
49 # define NUM_RCU_LVL_3 0
50 # define NUM_RCU_LVL_4 0
51 #elif NR_CPUS <= RCU_FANOUT_2
52 # define RCU_NUM_LVLS 2
53 # define NUM_RCU_LVL_0 1
54 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
55 # define NUM_RCU_LVL_2 (NR_CPUS)
56 # define NUM_RCU_LVL_3 0
57 # define NUM_RCU_LVL_4 0
58 #elif NR_CPUS <= RCU_FANOUT_3
59 # define RCU_NUM_LVLS 3
60 # define NUM_RCU_LVL_0 1
61 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
62 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
63 # define NUM_RCU_LVL_3 (NR_CPUS)
64 # define NUM_RCU_LVL_4 0
65 #elif NR_CPUS <= RCU_FANOUT_4
66 # define RCU_NUM_LVLS 4
67 # define NUM_RCU_LVL_0 1
68 # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
69 # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
70 # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
71 # define NUM_RCU_LVL_4 (NR_CPUS)
73 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
74 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
76 #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
77 #define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
79 extern int rcu_num_lvls;
80 extern int rcu_num_nodes;
83 * Dynticks per-CPU state.
86 long long dynticks_nesting; /* Track irq/process nesting level. */
87 /* Process level is worth LLONG_MAX/2. */
88 int dynticks_nmi_nesting; /* Track NMI nesting level. */
89 atomic_t dynticks; /* Even value for idle, else odd. */
90 #ifdef CONFIG_RCU_FAST_NO_HZ
91 int dyntick_drain; /* Prepare-for-idle state variable. */
92 unsigned long dyntick_holdoff;
93 /* No retries for the jiffy of failure. */
94 struct timer_list idle_gp_timer;
95 /* Wake up CPU sleeping with callbacks. */
96 unsigned long idle_gp_timer_expires;
97 /* When to wake up CPU (for repost). */
98 bool idle_first_pass; /* First pass of attempt to go idle? */
99 unsigned long nonlazy_posted;
100 /* # times non-lazy CBs posted to CPU. */
101 unsigned long nonlazy_posted_snap;
102 /* idle-period nonlazy_posted snapshot. */
103 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
104 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
107 /* RCU's kthread states for tracing. */
108 #define RCU_KTHREAD_STOPPED 0
109 #define RCU_KTHREAD_RUNNING 1
110 #define RCU_KTHREAD_WAITING 2
111 #define RCU_KTHREAD_OFFCPU 3
112 #define RCU_KTHREAD_YIELDING 4
113 #define RCU_KTHREAD_MAX 4
116 * Definition for node within the RCU grace-period-detection hierarchy.
119 raw_spinlock_t lock; /* Root rcu_node's lock protects some */
120 /* rcu_state fields as well as following. */
121 unsigned long gpnum; /* Current grace period for this node. */
122 /* This will either be equal to or one */
123 /* behind the root rcu_node's gpnum. */
124 unsigned long completed; /* Last GP completed for this node. */
125 /* This will either be equal to or one */
126 /* behind the root rcu_node's gpnum. */
127 unsigned long qsmask; /* CPUs or groups that need to switch in */
128 /* order for current grace period to proceed.*/
129 /* In leaf rcu_node, each bit corresponds to */
130 /* an rcu_data structure, otherwise, each */
131 /* bit corresponds to a child rcu_node */
133 unsigned long expmask; /* Groups that have ->blkd_tasks */
134 /* elements that need to drain to allow the */
135 /* current expedited grace period to */
136 /* complete (only for TREE_PREEMPT_RCU). */
137 unsigned long qsmaskinit;
138 /* Per-GP initial value for qsmask & expmask. */
139 unsigned long grpmask; /* Mask to apply to parent qsmask. */
140 /* Only one bit will be set in this mask. */
141 int grplo; /* lowest-numbered CPU or group here. */
142 int grphi; /* highest-numbered CPU or group here. */
143 u8 grpnum; /* CPU/group number for next level up. */
144 u8 level; /* root is at level 0. */
145 struct rcu_node *parent;
146 struct list_head blkd_tasks;
147 /* Tasks blocked in RCU read-side critical */
148 /* section. Tasks are placed at the head */
149 /* of this list and age towards the tail. */
150 struct list_head *gp_tasks;
151 /* Pointer to the first task blocking the */
152 /* current grace period, or NULL if there */
153 /* is no such task. */
154 struct list_head *exp_tasks;
155 /* Pointer to the first task blocking the */
156 /* current expedited grace period, or NULL */
157 /* if there is no such task. If there */
158 /* is no current expedited grace period, */
159 /* then there can cannot be any such task. */
160 #ifdef CONFIG_RCU_BOOST
161 struct list_head *boost_tasks;
162 /* Pointer to first task that needs to be */
163 /* priority boosted, or NULL if no priority */
164 /* boosting is needed for this rcu_node */
165 /* structure. If there are no tasks */
166 /* queued on this rcu_node structure that */
167 /* are blocking the current grace period, */
168 /* there can be no such task. */
169 unsigned long boost_time;
170 /* When to start boosting (jiffies). */
171 struct task_struct *boost_kthread_task;
172 /* kthread that takes care of priority */
173 /* boosting for this rcu_node structure. */
174 unsigned int boost_kthread_status;
175 /* State of boost_kthread_task for tracing. */
176 unsigned long n_tasks_boosted;
177 /* Total number of tasks boosted. */
178 unsigned long n_exp_boosts;
179 /* Number of tasks boosted for expedited GP. */
180 unsigned long n_normal_boosts;
181 /* Number of tasks boosted for normal GP. */
182 unsigned long n_balk_blkd_tasks;
183 /* Refused to boost: no blocked tasks. */
184 unsigned long n_balk_exp_gp_tasks;
185 /* Refused to boost: nothing blocking GP. */
186 unsigned long n_balk_boost_tasks;
187 /* Refused to boost: already boosting. */
188 unsigned long n_balk_notblocked;
189 /* Refused to boost: RCU RS CS still running. */
190 unsigned long n_balk_notyet;
191 /* Refused to boost: not yet time. */
192 unsigned long n_balk_nos;
193 /* Refused to boost: not sure why, though. */
194 /* This can happen due to race conditions. */
195 #endif /* #ifdef CONFIG_RCU_BOOST */
196 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
197 } ____cacheline_internodealigned_in_smp;
200 * Do a full breadth-first scan of the rcu_node structures for the
201 * specified rcu_state structure.
203 #define rcu_for_each_node_breadth_first(rsp, rnp) \
204 for ((rnp) = &(rsp)->node[0]; \
205 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
208 * Do a breadth-first scan of the non-leaf rcu_node structures for the
209 * specified rcu_state structure. Note that if there is a singleton
210 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
212 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
213 for ((rnp) = &(rsp)->node[0]; \
214 (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
217 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
218 * structure. Note that if there is a singleton rcu_node tree with but
219 * one rcu_node structure, this loop -will- visit the rcu_node structure.
220 * It is still a leaf node, even if it is also the root node.
222 #define rcu_for_each_leaf_node(rsp, rnp) \
223 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
224 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
226 /* Index values for nxttail array in struct rcu_data. */
227 #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
228 #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
229 #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
230 #define RCU_NEXT_TAIL 3
231 #define RCU_NEXT_SIZE 4
233 /* Per-CPU data for read-copy update. */
235 /* 1) quiescent-state and grace-period handling : */
236 unsigned long completed; /* Track rsp->completed gp number */
237 /* in order to detect GP end. */
238 unsigned long gpnum; /* Highest gp number that this CPU */
239 /* is aware of having started. */
240 bool passed_quiesce; /* User-mode/idle loop etc. */
241 bool qs_pending; /* Core waits for quiesc state. */
242 bool beenonline; /* CPU online at least once. */
243 bool preemptible; /* Preemptible RCU? */
244 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
245 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
246 #ifdef CONFIG_RCU_CPU_STALL_INFO
247 unsigned long ticks_this_gp; /* The number of scheduling-clock */
248 /* ticks this CPU has handled */
249 /* during and after the last grace */
250 /* period it is aware of. */
251 #endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
253 /* 2) batch handling */
255 * If nxtlist is not NULL, it is partitioned as follows.
256 * Any of the partitions might be empty, in which case the
257 * pointer to that partition will be equal to the pointer for
258 * the following partition. When the list is empty, all of
259 * the nxttail elements point to the ->nxtlist pointer itself,
260 * which in that case is NULL.
262 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
263 * Entries that batch # <= ->completed
264 * The grace period for these entries has completed, and
265 * the other grace-period-completed entries may be moved
266 * here temporarily in rcu_process_callbacks().
267 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
268 * Entries that batch # <= ->completed - 1: waiting for current GP
269 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
270 * Entries known to have arrived before current GP ended
271 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
272 * Entries that might have arrived after current GP ended
273 * Note that the value of *nxttail[RCU_NEXT_TAIL] will
274 * always be NULL, as this is the end of the list.
276 struct rcu_head *nxtlist;
277 struct rcu_head **nxttail[RCU_NEXT_SIZE];
278 unsigned long nxtcompleted[RCU_NEXT_SIZE];
279 /* grace periods for sublists. */
280 long qlen_lazy; /* # of lazy queued callbacks */
281 long qlen; /* # of queued callbacks, incl lazy */
282 long qlen_last_fqs_check;
283 /* qlen at last check for QS forcing */
284 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
285 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
286 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
287 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
288 unsigned long n_force_qs_snap;
289 /* did other CPU force QS recently? */
290 long blimit; /* Upper limit on a processed batch */
292 /* 3) dynticks interface. */
293 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
294 int dynticks_snap; /* Per-GP tracking for dynticks. */
296 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
297 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
298 unsigned long offline_fqs; /* Kicked due to being offline. */
300 /* 5) __rcu_pending() statistics. */
301 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
302 unsigned long n_rp_qs_pending;
303 unsigned long n_rp_report_qs;
304 unsigned long n_rp_cb_ready;
305 unsigned long n_rp_cpu_needs_gp;
306 unsigned long n_rp_gp_completed;
307 unsigned long n_rp_gp_started;
308 unsigned long n_rp_need_nothing;
310 /* 6) _rcu_barrier() and OOM callbacks. */
311 struct rcu_head barrier_head;
312 #ifdef CONFIG_RCU_FAST_NO_HZ
313 struct rcu_head oom_head;
314 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
316 /* 7) Callback offloading. */
317 #ifdef CONFIG_RCU_NOCB_CPU
318 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
319 struct rcu_head **nocb_tail;
320 atomic_long_t nocb_q_count; /* # CBs waiting for kthread */
321 atomic_long_t nocb_q_count_lazy; /* (approximate). */
322 int nocb_p_count; /* # CBs being invoked by kthread */
323 int nocb_p_count_lazy; /* (approximate). */
324 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
325 struct task_struct *nocb_kthread;
326 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
328 /* 8) RCU CPU stall data. */
329 #ifdef CONFIG_RCU_CPU_STALL_INFO
330 unsigned int softirq_snap; /* Snapshot of softirq activity. */
331 #endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
334 struct rcu_state *rsp;
337 /* Values for fqs_state field in struct rcu_state. */
338 #define RCU_GP_IDLE 0 /* No grace period in progress. */
339 #define RCU_GP_INIT 1 /* Grace period being initialized. */
340 #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
341 #define RCU_FORCE_QS 3 /* Need to force quiescent state. */
342 #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
344 #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
346 #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
347 /* to take at least one */
348 /* scheduling clock irq */
349 /* before ratting on them. */
351 #define rcu_wait(cond) \
354 set_current_state(TASK_INTERRUPTIBLE); \
359 __set_current_state(TASK_RUNNING); \
363 * RCU global state, including node hierarchy. This hierarchy is
364 * represented in "heap" form in a dense array. The root (first level)
365 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
366 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
367 * and the third level in ->node[m+1] and following (->node[m+1] referenced
368 * by ->level[2]). The number of levels is determined by the number of
369 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
370 * consisting of a single rcu_node.
373 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
374 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
375 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
376 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
377 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
378 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
379 void (*func)(struct rcu_head *head));
380 #ifdef CONFIG_RCU_NOCB_CPU
381 void (*call_remote)(struct rcu_head *head,
382 void (*func)(struct rcu_head *head));
383 /* call_rcu() flavor, but for */
384 /* placing on remote CPU. */
385 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
387 /* The following fields are guarded by the root rcu_node's lock. */
389 u8 fqs_state ____cacheline_internodealigned_in_smp;
390 /* Force QS state. */
391 u8 boost; /* Subject to priority boost. */
392 unsigned long gpnum; /* Current gp number. */
393 unsigned long completed; /* # of last completed gp. */
394 struct task_struct *gp_kthread; /* Task for grace periods. */
395 wait_queue_head_t gp_wq; /* Where GP task waits. */
396 int gp_flags; /* Commands for GP task. */
398 /* End of fields guarded by root rcu_node's lock. */
400 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
401 /* Protect following fields. */
402 struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
403 /* need a grace period. */
404 struct rcu_head **orphan_nxttail; /* Tail of above. */
405 struct rcu_head *orphan_donelist; /* Orphaned callbacks that */
406 /* are ready to invoke. */
407 struct rcu_head **orphan_donetail; /* Tail of above. */
408 long qlen_lazy; /* Number of lazy callbacks. */
409 long qlen; /* Total number of callbacks. */
410 /* End of fields guarded by orphan_lock. */
412 struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */
414 struct mutex barrier_mutex; /* Guards barrier fields. */
415 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
416 struct completion barrier_completion; /* Wake at barrier end. */
417 unsigned long n_barrier_done; /* ++ at start and end of */
418 /* _rcu_barrier(). */
419 /* End of fields guarded by barrier_mutex. */
421 atomic_long_t expedited_start; /* Starting ticket. */
422 atomic_long_t expedited_done; /* Done ticket. */
423 atomic_long_t expedited_wrap; /* # near-wrap incidents. */
424 atomic_long_t expedited_tryfail; /* # acquisition failures. */
425 atomic_long_t expedited_workdone1; /* # done by others #1. */
426 atomic_long_t expedited_workdone2; /* # done by others #2. */
427 atomic_long_t expedited_normal; /* # fallbacks to normal. */
428 atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
429 atomic_long_t expedited_done_tries; /* # tries to update _done. */
430 atomic_long_t expedited_done_lost; /* # times beaten to _done. */
431 atomic_long_t expedited_done_exit; /* # times exited _done loop. */
433 unsigned long jiffies_force_qs; /* Time at which to invoke */
434 /* force_quiescent_state(). */
435 unsigned long n_force_qs; /* Number of calls to */
436 /* force_quiescent_state(). */
437 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
438 /* due to lock unavailable. */
439 unsigned long n_force_qs_ngp; /* Number of calls leaving */
440 /* due to no GP active. */
441 unsigned long gp_start; /* Time at which GP started, */
442 /* but in jiffies. */
443 unsigned long jiffies_stall; /* Time at which to check */
444 /* for CPU stalls. */
445 unsigned long gp_max; /* Maximum GP duration in */
447 char *name; /* Name of structure. */
448 struct list_head flavors; /* List of RCU flavors. */
451 /* Values for rcu_state structure's gp_flags field. */
452 #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
453 #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
455 extern struct list_head rcu_struct_flavors;
457 /* Sequence through rcu_state structures for each RCU flavor. */
458 #define for_each_rcu_flavor(rsp) \
459 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
461 /* Return values for rcu_preempt_offline_tasks(). */
463 #define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
464 /* GP were moved to root. */
465 #define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
466 /* GP were moved to root. */
469 * RCU implementation internal declarations:
471 extern struct rcu_state rcu_sched_state;
472 DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
474 extern struct rcu_state rcu_bh_state;
475 DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
477 #ifdef CONFIG_TREE_PREEMPT_RCU
478 extern struct rcu_state rcu_preempt_state;
479 DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
480 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
482 #ifdef CONFIG_RCU_BOOST
483 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
484 DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
485 DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
486 DECLARE_PER_CPU(char, rcu_cpu_has_work);
487 #endif /* #ifdef CONFIG_RCU_BOOST */
489 #ifndef RCU_TREE_NONCORE
491 /* Forward declarations for rcutree_plugin.h */
492 static void rcu_bootup_announce(void);
493 long rcu_batches_completed(void);
494 static void rcu_preempt_note_context_switch(int cpu);
495 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
496 #ifdef CONFIG_HOTPLUG_CPU
497 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
498 unsigned long flags);
499 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
500 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
501 static int rcu_print_task_stall(struct rcu_node *rnp);
502 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
503 #ifdef CONFIG_HOTPLUG_CPU
504 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
505 struct rcu_node *rnp,
506 struct rcu_data *rdp);
507 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
508 static void rcu_preempt_check_callbacks(int cpu);
509 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
510 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
511 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
513 #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
514 static void __init __rcu_init_preempt(void);
515 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
516 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
517 static void invoke_rcu_callbacks_kthread(void);
518 static bool rcu_is_callbacks_kthread(void);
519 #ifdef CONFIG_RCU_BOOST
520 static void rcu_preempt_do_callbacks(void);
521 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
522 struct rcu_node *rnp);
523 #endif /* #ifdef CONFIG_RCU_BOOST */
524 static void __cpuinit rcu_prepare_kthreads(int cpu);
525 static void rcu_prepare_for_idle_init(int cpu);
526 static void rcu_cleanup_after_idle(int cpu);
527 static void rcu_prepare_for_idle(int cpu);
528 static void rcu_idle_count_callbacks_posted(void);
529 static void print_cpu_stall_info_begin(void);
530 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
531 static void print_cpu_stall_info_end(void);
532 static void zero_cpu_stall_ticks(struct rcu_data *rdp);
533 static void increment_cpu_stall_ticks(void);
534 static bool is_nocb_cpu(int cpu);
535 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
537 static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
538 struct rcu_data *rdp);
539 static bool nocb_cpu_expendable(int cpu);
540 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
541 static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
542 static void init_nocb_callback_list(struct rcu_data *rdp);
543 static void __init rcu_init_nocb(void);
545 #endif /* #ifndef RCU_TREE_NONCORE */
547 #ifdef CONFIG_RCU_TRACE
548 #ifdef CONFIG_RCU_NOCB_CPU
549 /* Sum up queue lengths for tracing. */
550 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
552 *ql = atomic_long_read(&rdp->nocb_q_count) + rdp->nocb_p_count;
553 *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + rdp->nocb_p_count_lazy;
555 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
556 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
561 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
562 #endif /* #ifdef CONFIG_RCU_TRACE */