rcu: Remove rcu_preempt_remove_callbacks()
[firefly-linux-kernel-4.4.55.git] / kernel / rcutiny_plugin.h
1 /*
2  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
3  * Internal non-public definitions that provide either classic
4  * or preemptible semantics.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  *
20  * Copyright (c) 2010 Linaro
21  *
22  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23  */
24
25 #include <linux/kthread.h>
26 #include <linux/module.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29
30 /* Global control variables for rcupdate callback mechanism. */
31 struct rcu_ctrlblk {
32         struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
33         struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
34         struct rcu_head **curtail;      /* ->next pointer of last CB. */
35         RCU_TRACE(long qlen);           /* Number of pending CBs. */
36         RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
37         RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
38         RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
39         RCU_TRACE(char *name);          /* Name of RCU type. */
40 };
41
42 /* Definition for rcupdate control block. */
43 static struct rcu_ctrlblk rcu_sched_ctrlblk = {
44         .donetail       = &rcu_sched_ctrlblk.rcucblist,
45         .curtail        = &rcu_sched_ctrlblk.rcucblist,
46         RCU_TRACE(.name = "rcu_sched")
47 };
48
49 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
50         .donetail       = &rcu_bh_ctrlblk.rcucblist,
51         .curtail        = &rcu_bh_ctrlblk.rcucblist,
52         RCU_TRACE(.name = "rcu_bh")
53 };
54
55 #ifdef CONFIG_DEBUG_LOCK_ALLOC
56 int rcu_scheduler_active __read_mostly;
57 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
58 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
59
60 #ifdef CONFIG_RCU_TRACE
61
62 static void check_cpu_stall(struct rcu_ctrlblk *rcp)
63 {
64         unsigned long j;
65         unsigned long js;
66
67         if (rcu_cpu_stall_suppress)
68                 return;
69         rcp->ticks_this_gp++;
70         j = jiffies;
71         js = rcp->jiffies_stall;
72         if (*rcp->curtail && ULONG_CMP_GE(j, js)) {
73                 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
74                        rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting,
75                        jiffies - rcp->gp_start, rcp->qlen);
76                 dump_stack();
77         }
78         if (*rcp->curtail && ULONG_CMP_GE(j, js))
79                 rcp->jiffies_stall = jiffies +
80                         3 * rcu_jiffies_till_stall_check() + 3;
81         else if (ULONG_CMP_GE(j, js))
82                 rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
83 }
84
85 static void check_cpu_stall_preempt(void);
86
87 #endif /* #ifdef CONFIG_RCU_TRACE */
88
89 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
90 {
91 #ifdef CONFIG_RCU_TRACE
92         rcp->ticks_this_gp = 0;
93         rcp->gp_start = jiffies;
94         rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
95 #endif /* #ifdef CONFIG_RCU_TRACE */
96 }
97
98 static void check_cpu_stalls(void)
99 {
100         RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk));
101         RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk));
102         RCU_TRACE(check_cpu_stall_preempt());
103 }
104
105 /*
106  * Because preemptible RCU does not exist, it never has any callbacks
107  * to process.
108  */
109 static void rcu_preempt_process_callbacks(void)
110 {
111 }
112
113 /* Hold off callback invocation until early_initcall() time. */
114 static int rcu_scheduler_fully_active __read_mostly;
115
116 /*
117  * Start up softirq processing of callbacks.
118  */
119 void invoke_rcu_callbacks(void)
120 {
121         if (rcu_scheduler_fully_active)
122                 raise_softirq(RCU_SOFTIRQ);
123 }
124
125 #ifdef CONFIG_RCU_TRACE
126
127 /*
128  * There is no callback kthread, so this thread is never it.
129  */
130 static bool rcu_is_callbacks_kthread(void)
131 {
132         return false;
133 }
134
135 #endif /* #ifdef CONFIG_RCU_TRACE */
136
137 static int __init rcu_scheduler_really_started(void)
138 {
139         rcu_scheduler_fully_active = 1;
140         open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
141         raise_softirq(RCU_SOFTIRQ);  /* Invoke any callbacks from early boot. */
142         return 0;
143 }
144 early_initcall(rcu_scheduler_really_started);
145
146 #ifdef CONFIG_DEBUG_LOCK_ALLOC
147 #include <linux/kernel_stat.h>
148
149 /*
150  * During boot, we forgive RCU lockdep issues.  After this function is
151  * invoked, we start taking RCU lockdep issues seriously.
152  */
153 void __init rcu_scheduler_starting(void)
154 {
155         WARN_ON(nr_context_switches() > 0);
156         rcu_scheduler_active = 1;
157 }
158
159 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
160
161 #ifdef CONFIG_RCU_TRACE
162
163 static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
164 {
165         unsigned long flags;
166
167         local_irq_save(flags);
168         rcp->qlen -= n;
169         local_irq_restore(flags);
170 }
171
172 /*
173  * Dump statistics for TINY_RCU, such as they are.
174  */
175 static int show_tiny_stats(struct seq_file *m, void *unused)
176 {
177         seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
178         seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
179         return 0;
180 }
181
182 static int show_tiny_stats_open(struct inode *inode, struct file *file)
183 {
184         return single_open(file, show_tiny_stats, NULL);
185 }
186
187 static const struct file_operations show_tiny_stats_fops = {
188         .owner = THIS_MODULE,
189         .open = show_tiny_stats_open,
190         .read = seq_read,
191         .llseek = seq_lseek,
192         .release = single_release,
193 };
194
195 static struct dentry *rcudir;
196
197 static int __init rcutiny_trace_init(void)
198 {
199         struct dentry *retval;
200
201         rcudir = debugfs_create_dir("rcu", NULL);
202         if (!rcudir)
203                 goto free_out;
204         retval = debugfs_create_file("rcudata", 0444, rcudir,
205                                      NULL, &show_tiny_stats_fops);
206         if (!retval)
207                 goto free_out;
208         return 0;
209 free_out:
210         debugfs_remove_recursive(rcudir);
211         return 1;
212 }
213
214 static void __exit rcutiny_trace_cleanup(void)
215 {
216         debugfs_remove_recursive(rcudir);
217 }
218
219 module_init(rcutiny_trace_init);
220 module_exit(rcutiny_trace_cleanup);
221
222 MODULE_AUTHOR("Paul E. McKenney");
223 MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
224 MODULE_LICENSE("GPL");
225
226 static void check_cpu_stall_preempt(void)
227 {
228 }
229
230 #endif /* #ifdef CONFIG_RCU_TRACE */