static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_...
[firefly-linux-kernel-4.4.55.git] / kernel / jump_label.c
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16
17 #ifdef HAVE_JUMP_LABEL
18
19 /* mutex to protect coming/going of the the jump_label table */
20 static DEFINE_MUTEX(jump_label_mutex);
21
22 void jump_label_lock(void)
23 {
24         mutex_lock(&jump_label_mutex);
25 }
26
27 void jump_label_unlock(void)
28 {
29         mutex_unlock(&jump_label_mutex);
30 }
31
32 bool static_key_enabled(struct static_key *key)
33 {
34         return (atomic_read(&key->enabled) > 0);
35 }
36 EXPORT_SYMBOL_GPL(static_key_enabled);
37
38 static int jump_label_cmp(const void *a, const void *b)
39 {
40         const struct jump_entry *jea = a;
41         const struct jump_entry *jeb = b;
42
43         if (jea->key < jeb->key)
44                 return -1;
45
46         if (jea->key > jeb->key)
47                 return 1;
48
49         return 0;
50 }
51
52 static void
53 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
54 {
55         unsigned long size;
56
57         size = (((unsigned long)stop - (unsigned long)start)
58                                         / sizeof(struct jump_entry));
59         sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
60 }
61
62 static void jump_label_update(struct static_key *key, int enable);
63
64 void static_key_slow_inc(struct static_key *key)
65 {
66         if (atomic_inc_not_zero(&key->enabled))
67                 return;
68
69         jump_label_lock();
70         if (atomic_read(&key->enabled) == 0) {
71                 if (!jump_label_get_branch_default(key))
72                         jump_label_update(key, JUMP_LABEL_ENABLE);
73                 else
74                         jump_label_update(key, JUMP_LABEL_DISABLE);
75         }
76         atomic_inc(&key->enabled);
77         jump_label_unlock();
78 }
79 EXPORT_SYMBOL_GPL(static_key_slow_inc);
80
81 static void __static_key_slow_dec(struct static_key *key,
82                 unsigned long rate_limit, struct delayed_work *work)
83 {
84         if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
85                 WARN(atomic_read(&key->enabled) < 0,
86                      "jump label: negative count!\n");
87                 return;
88         }
89
90         if (rate_limit) {
91                 atomic_inc(&key->enabled);
92                 schedule_delayed_work(work, rate_limit);
93         } else {
94                 if (!jump_label_get_branch_default(key))
95                         jump_label_update(key, JUMP_LABEL_DISABLE);
96                 else
97                         jump_label_update(key, JUMP_LABEL_ENABLE);
98         }
99         jump_label_unlock();
100 }
101
102 static void jump_label_update_timeout(struct work_struct *work)
103 {
104         struct static_key_deferred *key =
105                 container_of(work, struct static_key_deferred, work.work);
106         __static_key_slow_dec(&key->key, 0, NULL);
107 }
108
109 void static_key_slow_dec(struct static_key *key)
110 {
111         __static_key_slow_dec(key, 0, NULL);
112 }
113 EXPORT_SYMBOL_GPL(static_key_slow_dec);
114
115 void static_key_slow_dec_deferred(struct static_key_deferred *key)
116 {
117         __static_key_slow_dec(&key->key, key->timeout, &key->work);
118 }
119 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
120
121 void jump_label_rate_limit(struct static_key_deferred *key,
122                 unsigned long rl)
123 {
124         key->timeout = rl;
125         INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
126 }
127
128 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
129 {
130         if (entry->code <= (unsigned long)end &&
131                 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
132                 return 1;
133
134         return 0;
135 }
136
137 static int __jump_label_text_reserved(struct jump_entry *iter_start,
138                 struct jump_entry *iter_stop, void *start, void *end)
139 {
140         struct jump_entry *iter;
141
142         iter = iter_start;
143         while (iter < iter_stop) {
144                 if (addr_conflict(iter, start, end))
145                         return 1;
146                 iter++;
147         }
148
149         return 0;
150 }
151
152 /* 
153  * Update code which is definitely not currently executing.
154  * Architectures which need heavyweight synchronization to modify
155  * running code can override this to make the non-live update case
156  * cheaper.
157  */
158 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
159                                             enum jump_label_type type)
160 {
161         arch_jump_label_transform(entry, type); 
162 }
163
164 static void __jump_label_update(struct static_key *key,
165                                 struct jump_entry *entry,
166                                 struct jump_entry *stop, int enable)
167 {
168         for (; (entry < stop) &&
169               (entry->key == (jump_label_t)(unsigned long)key);
170               entry++) {
171                 /*
172                  * entry->code set to 0 invalidates module init text sections
173                  * kernel_text_address() verifies we are not in core kernel
174                  * init code, see jump_label_invalidate_module_init().
175                  */
176                 if (entry->code && kernel_text_address(entry->code))
177                         arch_jump_label_transform(entry, enable);
178         }
179 }
180
181 static enum jump_label_type jump_label_type(struct static_key *key)
182 {
183         bool true_branch = jump_label_get_branch_default(key);
184         bool state = static_key_enabled(key);
185
186         if ((!true_branch && state) || (true_branch && !state))
187                 return JUMP_LABEL_ENABLE;
188
189         return JUMP_LABEL_DISABLE;
190 }
191
192 void __init jump_label_init(void)
193 {
194         struct jump_entry *iter_start = __start___jump_table;
195         struct jump_entry *iter_stop = __stop___jump_table;
196         struct static_key *key = NULL;
197         struct jump_entry *iter;
198
199         jump_label_lock();
200         jump_label_sort_entries(iter_start, iter_stop);
201
202         for (iter = iter_start; iter < iter_stop; iter++) {
203                 struct static_key *iterk;
204
205                 iterk = (struct static_key *)(unsigned long)iter->key;
206                 arch_jump_label_transform_static(iter, jump_label_type(iterk));
207                 if (iterk == key)
208                         continue;
209
210                 key = iterk;
211                 /*
212                  * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
213                  */
214                 *((unsigned long *)&key->entries) += (unsigned long)iter;
215 #ifdef CONFIG_MODULES
216                 key->next = NULL;
217 #endif
218         }
219         jump_label_unlock();
220 }
221
222 #ifdef CONFIG_MODULES
223
224 struct static_key_mod {
225         struct static_key_mod *next;
226         struct jump_entry *entries;
227         struct module *mod;
228 };
229
230 static int __jump_label_mod_text_reserved(void *start, void *end)
231 {
232         struct module *mod;
233
234         mod = __module_text_address((unsigned long)start);
235         if (!mod)
236                 return 0;
237
238         WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
239
240         return __jump_label_text_reserved(mod->jump_entries,
241                                 mod->jump_entries + mod->num_jump_entries,
242                                 start, end);
243 }
244
245 static void __jump_label_mod_update(struct static_key *key, int enable)
246 {
247         struct static_key_mod *mod = key->next;
248
249         while (mod) {
250                 struct module *m = mod->mod;
251
252                 __jump_label_update(key, mod->entries,
253                                     m->jump_entries + m->num_jump_entries,
254                                     enable);
255                 mod = mod->next;
256         }
257 }
258
259 /***
260  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
261  * @mod: module to patch
262  *
263  * Allow for run-time selection of the optimal nops. Before the module
264  * loads patch these with arch_get_jump_label_nop(), which is specified by
265  * the arch specific jump label code.
266  */
267 void jump_label_apply_nops(struct module *mod)
268 {
269         struct jump_entry *iter_start = mod->jump_entries;
270         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
271         struct jump_entry *iter;
272
273         /* if the module doesn't have jump label entries, just return */
274         if (iter_start == iter_stop)
275                 return;
276
277         for (iter = iter_start; iter < iter_stop; iter++) {
278                 arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
279         }
280 }
281
282 static int jump_label_add_module(struct module *mod)
283 {
284         struct jump_entry *iter_start = mod->jump_entries;
285         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
286         struct jump_entry *iter;
287         struct static_key *key = NULL;
288         struct static_key_mod *jlm;
289
290         /* if the module doesn't have jump label entries, just return */
291         if (iter_start == iter_stop)
292                 return 0;
293
294         jump_label_sort_entries(iter_start, iter_stop);
295
296         for (iter = iter_start; iter < iter_stop; iter++) {
297                 struct static_key *iterk;
298
299                 iterk = (struct static_key *)(unsigned long)iter->key;
300                 if (iterk == key)
301                         continue;
302
303                 key = iterk;
304                 if (__module_address(iter->key) == mod) {
305                         /*
306                          * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
307                          */
308                         *((unsigned long *)&key->entries) += (unsigned long)iter;
309                         key->next = NULL;
310                         continue;
311                 }
312                 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
313                 if (!jlm)
314                         return -ENOMEM;
315                 jlm->mod = mod;
316                 jlm->entries = iter;
317                 jlm->next = key->next;
318                 key->next = jlm;
319
320                 if (jump_label_type(key) == JUMP_LABEL_ENABLE)
321                         __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
322         }
323
324         return 0;
325 }
326
327 static void jump_label_del_module(struct module *mod)
328 {
329         struct jump_entry *iter_start = mod->jump_entries;
330         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
331         struct jump_entry *iter;
332         struct static_key *key = NULL;
333         struct static_key_mod *jlm, **prev;
334
335         for (iter = iter_start; iter < iter_stop; iter++) {
336                 if (iter->key == (jump_label_t)(unsigned long)key)
337                         continue;
338
339                 key = (struct static_key *)(unsigned long)iter->key;
340
341                 if (__module_address(iter->key) == mod)
342                         continue;
343
344                 prev = &key->next;
345                 jlm = key->next;
346
347                 while (jlm && jlm->mod != mod) {
348                         prev = &jlm->next;
349                         jlm = jlm->next;
350                 }
351
352                 if (jlm) {
353                         *prev = jlm->next;
354                         kfree(jlm);
355                 }
356         }
357 }
358
359 static void jump_label_invalidate_module_init(struct module *mod)
360 {
361         struct jump_entry *iter_start = mod->jump_entries;
362         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
363         struct jump_entry *iter;
364
365         for (iter = iter_start; iter < iter_stop; iter++) {
366                 if (within_module_init(iter->code, mod))
367                         iter->code = 0;
368         }
369 }
370
371 static int
372 jump_label_module_notify(struct notifier_block *self, unsigned long val,
373                          void *data)
374 {
375         struct module *mod = data;
376         int ret = 0;
377
378         switch (val) {
379         case MODULE_STATE_COMING:
380                 jump_label_lock();
381                 ret = jump_label_add_module(mod);
382                 if (ret)
383                         jump_label_del_module(mod);
384                 jump_label_unlock();
385                 break;
386         case MODULE_STATE_GOING:
387                 jump_label_lock();
388                 jump_label_del_module(mod);
389                 jump_label_unlock();
390                 break;
391         case MODULE_STATE_LIVE:
392                 jump_label_lock();
393                 jump_label_invalidate_module_init(mod);
394                 jump_label_unlock();
395                 break;
396         }
397
398         return notifier_from_errno(ret);
399 }
400
401 struct notifier_block jump_label_module_nb = {
402         .notifier_call = jump_label_module_notify,
403         .priority = 1, /* higher than tracepoints */
404 };
405
406 static __init int jump_label_init_module(void)
407 {
408         return register_module_notifier(&jump_label_module_nb);
409 }
410 early_initcall(jump_label_init_module);
411
412 #endif /* CONFIG_MODULES */
413
414 /***
415  * jump_label_text_reserved - check if addr range is reserved
416  * @start: start text addr
417  * @end: end text addr
418  *
419  * checks if the text addr located between @start and @end
420  * overlaps with any of the jump label patch addresses. Code
421  * that wants to modify kernel text should first verify that
422  * it does not overlap with any of the jump label addresses.
423  * Caller must hold jump_label_mutex.
424  *
425  * returns 1 if there is an overlap, 0 otherwise
426  */
427 int jump_label_text_reserved(void *start, void *end)
428 {
429         int ret = __jump_label_text_reserved(__start___jump_table,
430                         __stop___jump_table, start, end);
431
432         if (ret)
433                 return ret;
434
435 #ifdef CONFIG_MODULES
436         ret = __jump_label_mod_text_reserved(start, end);
437 #endif
438         return ret;
439 }
440
441 static void jump_label_update(struct static_key *key, int enable)
442 {
443         struct jump_entry *stop = __stop___jump_table;
444         struct jump_entry *entry = jump_label_get_entries(key);
445
446 #ifdef CONFIG_MODULES
447         struct module *mod = __module_address((unsigned long)key);
448
449         __jump_label_mod_update(key, enable);
450
451         if (mod)
452                 stop = mod->jump_entries + mod->num_jump_entries;
453 #endif
454         /* if there are no users, entry can be NULL */
455         if (entry)
456                 __jump_label_update(key, entry, stop, enable);
457 }
458
459 #endif