2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/device.h>
46 #include <linux/string.h>
47 #include <linux/mutex.h>
48 #include <linux/rculist.h>
49 #include <asm/uaccess.h>
50 #include <asm/cacheflush.h>
51 #include <asm/mmu_context.h>
52 #include <linux/license.h>
53 #include <asm/sections.h>
54 #include <linux/tracepoint.h>
55 #include <linux/ftrace.h>
56 #include <linux/async.h>
57 #include <linux/percpu.h>
58 #include <linux/kmemleak.h>
59 #include <linux/jump_label.h>
60 #include <linux/pfn.h>
61 #include <linux/bsearch.h>
62 #include <uapi/linux/module.h>
63 #include "module-internal.h"
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/module.h>
68 #ifndef ARCH_SHF_SMALL
69 #define ARCH_SHF_SMALL 0
73 * Modules' sections will be aligned on page boundaries
74 * to ensure complete separation of code and data, but
75 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
77 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
78 # define debug_align(X) ALIGN(X, PAGE_SIZE)
80 # define debug_align(X) (X)
84 * Given BASE and SIZE this macro calculates the number of pages the
85 * memory regions occupies
87 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
88 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
89 PFN_DOWN((unsigned long)BASE) + 1) \
92 /* If this is set, the section belongs in the init part of the module */
93 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
97 * 1) List of modules (also safely readable with preempt_disable),
98 * 2) module_use links,
99 * 3) module_addr_min/module_addr_max.
100 * (delete and add uses RCU list operations). */
101 DEFINE_MUTEX(module_mutex);
102 EXPORT_SYMBOL_GPL(module_mutex);
103 static LIST_HEAD(modules);
105 #ifdef CONFIG_MODULES_TREE_LOOKUP
108 * Use a latched RB-tree for __module_address(); this allows us to use
109 * RCU-sched lookups of the address from any context.
111 * Because modules have two address ranges: init and core, we need two
112 * latch_tree_nodes entries. Therefore we need the back-pointer from
115 * Because init ranges are short lived we mark them unlikely and have placed
116 * them outside the critical cacheline in struct module.
118 * This is conditional on PERF_EVENTS || TRACING because those can really hit
119 * __module_address() hard by doing a lot of stack unwinding; potentially from
123 static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
125 struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
126 struct module *mod = mtn->mod;
128 if (unlikely(mtn == &mod->mtn_init))
129 return (unsigned long)mod->module_init;
131 return (unsigned long)mod->module_core;
134 static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
136 struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
137 struct module *mod = mtn->mod;
139 if (unlikely(mtn == &mod->mtn_init))
140 return (unsigned long)mod->init_size;
142 return (unsigned long)mod->core_size;
145 static __always_inline bool
146 mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
148 return __mod_tree_val(a) < __mod_tree_val(b);
151 static __always_inline int
152 mod_tree_comp(void *key, struct latch_tree_node *n)
154 unsigned long val = (unsigned long)key;
155 unsigned long start, end;
157 start = __mod_tree_val(n);
161 end = start + __mod_tree_size(n);
168 static const struct latch_tree_ops mod_tree_ops = {
169 .less = mod_tree_less,
170 .comp = mod_tree_comp,
173 static struct mod_tree_root {
174 struct latch_tree_root root;
175 unsigned long addr_min;
176 unsigned long addr_max;
177 } mod_tree __cacheline_aligned = {
181 #define module_addr_min mod_tree.addr_min
182 #define module_addr_max mod_tree.addr_max
184 static noinline void __mod_tree_insert(struct mod_tree_node *node)
186 latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
189 static void __mod_tree_remove(struct mod_tree_node *node)
191 latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
195 * These modifications: insert, remove_init and remove; are serialized by the
198 static void mod_tree_insert(struct module *mod)
200 mod->mtn_core.mod = mod;
201 mod->mtn_init.mod = mod;
203 __mod_tree_insert(&mod->mtn_core);
205 __mod_tree_insert(&mod->mtn_init);
208 static void mod_tree_remove_init(struct module *mod)
211 __mod_tree_remove(&mod->mtn_init);
214 static void mod_tree_remove(struct module *mod)
216 __mod_tree_remove(&mod->mtn_core);
217 mod_tree_remove_init(mod);
220 static struct module *mod_find(unsigned long addr)
222 struct latch_tree_node *ltn;
224 ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
228 return container_of(ltn, struct mod_tree_node, node)->mod;
231 #else /* MODULES_TREE_LOOKUP */
233 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
235 static void mod_tree_insert(struct module *mod) { }
236 static void mod_tree_remove_init(struct module *mod) { }
237 static void mod_tree_remove(struct module *mod) { }
239 static struct module *mod_find(unsigned long addr)
243 list_for_each_entry_rcu(mod, &modules, list) {
244 if (within_module(addr, mod))
251 #endif /* MODULES_TREE_LOOKUP */
254 * Bounds of module text, for speeding up __module_address.
255 * Protected by module_mutex.
257 static void __mod_update_bounds(void *base, unsigned int size)
259 unsigned long min = (unsigned long)base;
260 unsigned long max = min + size;
262 if (min < module_addr_min)
263 module_addr_min = min;
264 if (max > module_addr_max)
265 module_addr_max = max;
268 static void mod_update_bounds(struct module *mod)
270 __mod_update_bounds(mod->module_core, mod->core_size);
272 __mod_update_bounds(mod->module_init, mod->init_size);
275 #ifdef CONFIG_KGDB_KDB
276 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
277 #endif /* CONFIG_KGDB_KDB */
279 static void module_assert_mutex(void)
281 lockdep_assert_held(&module_mutex);
284 static void module_assert_mutex_or_preempt(void)
286 #ifdef CONFIG_LOCKDEP
287 if (unlikely(!debug_locks))
290 WARN_ON(!rcu_read_lock_sched_held() &&
291 !lockdep_is_held(&module_mutex));
295 #ifdef CONFIG_MODULE_SIG
296 #ifdef CONFIG_MODULE_SIG_FORCE
297 static bool sig_enforce = true;
299 static bool sig_enforce = false;
301 static int param_set_bool_enable_only(const char *val,
302 const struct kernel_param *kp)
306 struct kernel_param dummy_kp = *kp;
308 dummy_kp.arg = &test;
310 err = param_set_bool(val, &dummy_kp);
314 /* Don't let them unset it once it's set! */
315 if (!test && sig_enforce)
323 static const struct kernel_param_ops param_ops_bool_enable_only = {
324 .flags = KERNEL_PARAM_OPS_FL_NOARG,
325 .set = param_set_bool_enable_only,
326 .get = param_get_bool,
328 #define param_check_bool_enable_only param_check_bool
330 module_param(sig_enforce, bool_enable_only, 0644);
331 #endif /* !CONFIG_MODULE_SIG_FORCE */
332 #endif /* CONFIG_MODULE_SIG */
334 /* Block module loading/unloading? */
335 int modules_disabled = 0;
336 core_param(nomodule, modules_disabled, bint, 0);
338 /* Waiting for a module to finish initializing? */
339 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
341 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
343 int register_module_notifier(struct notifier_block *nb)
345 return blocking_notifier_chain_register(&module_notify_list, nb);
347 EXPORT_SYMBOL(register_module_notifier);
349 int unregister_module_notifier(struct notifier_block *nb)
351 return blocking_notifier_chain_unregister(&module_notify_list, nb);
353 EXPORT_SYMBOL(unregister_module_notifier);
359 char *secstrings, *strtab;
360 unsigned long symoffs, stroffs;
361 struct _ddebug *debug;
362 unsigned int num_debug;
365 unsigned int sym, str, mod, vers, info, pcpu;
369 /* We require a truly strong try_module_get(): 0 means failure due to
370 ongoing or failed initialization etc. */
371 static inline int strong_try_module_get(struct module *mod)
373 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
374 if (mod && mod->state == MODULE_STATE_COMING)
376 if (try_module_get(mod))
382 static inline void add_taint_module(struct module *mod, unsigned flag,
383 enum lockdep_ok lockdep_ok)
385 add_taint(flag, lockdep_ok);
386 mod->taints |= (1U << flag);
390 * A thread that wants to hold a reference to a module only while it
391 * is running can call this to safely exit. nfsd and lockd use this.
393 void __module_put_and_exit(struct module *mod, long code)
398 EXPORT_SYMBOL(__module_put_and_exit);
400 /* Find a module section: 0 means not found. */
401 static unsigned int find_sec(const struct load_info *info, const char *name)
405 for (i = 1; i < info->hdr->e_shnum; i++) {
406 Elf_Shdr *shdr = &info->sechdrs[i];
407 /* Alloc bit cleared means "ignore it." */
408 if ((shdr->sh_flags & SHF_ALLOC)
409 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
415 /* Find a module section, or NULL. */
416 static void *section_addr(const struct load_info *info, const char *name)
418 /* Section 0 has sh_addr 0. */
419 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
422 /* Find a module section, or NULL. Fill in number of "objects" in section. */
423 static void *section_objs(const struct load_info *info,
428 unsigned int sec = find_sec(info, name);
430 /* Section 0 has sh_addr 0 and sh_size 0. */
431 *num = info->sechdrs[sec].sh_size / object_size;
432 return (void *)info->sechdrs[sec].sh_addr;
435 /* Provided by the linker */
436 extern const struct kernel_symbol __start___ksymtab[];
437 extern const struct kernel_symbol __stop___ksymtab[];
438 extern const struct kernel_symbol __start___ksymtab_gpl[];
439 extern const struct kernel_symbol __stop___ksymtab_gpl[];
440 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
441 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
442 extern const unsigned long __start___kcrctab[];
443 extern const unsigned long __start___kcrctab_gpl[];
444 extern const unsigned long __start___kcrctab_gpl_future[];
445 #ifdef CONFIG_UNUSED_SYMBOLS
446 extern const struct kernel_symbol __start___ksymtab_unused[];
447 extern const struct kernel_symbol __stop___ksymtab_unused[];
448 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
449 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
450 extern const unsigned long __start___kcrctab_unused[];
451 extern const unsigned long __start___kcrctab_unused_gpl[];
454 #ifndef CONFIG_MODVERSIONS
455 #define symversion(base, idx) NULL
457 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
460 static bool each_symbol_in_section(const struct symsearch *arr,
461 unsigned int arrsize,
462 struct module *owner,
463 bool (*fn)(const struct symsearch *syms,
464 struct module *owner,
470 for (j = 0; j < arrsize; j++) {
471 if (fn(&arr[j], owner, data))
478 /* Returns true as soon as fn returns true, otherwise false. */
479 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
480 struct module *owner,
485 static const struct symsearch arr[] = {
486 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
487 NOT_GPL_ONLY, false },
488 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
489 __start___kcrctab_gpl,
491 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
492 __start___kcrctab_gpl_future,
493 WILL_BE_GPL_ONLY, false },
494 #ifdef CONFIG_UNUSED_SYMBOLS
495 { __start___ksymtab_unused, __stop___ksymtab_unused,
496 __start___kcrctab_unused,
497 NOT_GPL_ONLY, true },
498 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
499 __start___kcrctab_unused_gpl,
504 module_assert_mutex_or_preempt();
506 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
509 list_for_each_entry_rcu(mod, &modules, list) {
510 struct symsearch arr[] = {
511 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
512 NOT_GPL_ONLY, false },
513 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
516 { mod->gpl_future_syms,
517 mod->gpl_future_syms + mod->num_gpl_future_syms,
518 mod->gpl_future_crcs,
519 WILL_BE_GPL_ONLY, false },
520 #ifdef CONFIG_UNUSED_SYMBOLS
522 mod->unused_syms + mod->num_unused_syms,
524 NOT_GPL_ONLY, true },
525 { mod->unused_gpl_syms,
526 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
527 mod->unused_gpl_crcs,
532 if (mod->state == MODULE_STATE_UNFORMED)
535 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
540 EXPORT_SYMBOL_GPL(each_symbol_section);
542 struct find_symbol_arg {
549 struct module *owner;
550 const unsigned long *crc;
551 const struct kernel_symbol *sym;
554 static bool check_symbol(const struct symsearch *syms,
555 struct module *owner,
556 unsigned int symnum, void *data)
558 struct find_symbol_arg *fsa = data;
561 if (syms->licence == GPL_ONLY)
563 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
564 pr_warn("Symbol %s is being used by a non-GPL module, "
565 "which will not be allowed in the future\n",
570 #ifdef CONFIG_UNUSED_SYMBOLS
571 if (syms->unused && fsa->warn) {
572 pr_warn("Symbol %s is marked as UNUSED, however this module is "
573 "using it.\n", fsa->name);
574 pr_warn("This symbol will go away in the future.\n");
575 pr_warn("Please evaluate if this is the right api to use and "
576 "if it really is, submit a report to the linux kernel "
577 "mailing list together with submitting your code for "
583 fsa->crc = symversion(syms->crcs, symnum);
584 fsa->sym = &syms->start[symnum];
588 static int cmp_name(const void *va, const void *vb)
591 const struct kernel_symbol *b;
593 return strcmp(a, b->name);
596 static bool find_symbol_in_section(const struct symsearch *syms,
597 struct module *owner,
600 struct find_symbol_arg *fsa = data;
601 struct kernel_symbol *sym;
603 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
604 sizeof(struct kernel_symbol), cmp_name);
606 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
612 /* Find a symbol and return it, along with, (optional) crc and
613 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
614 const struct kernel_symbol *find_symbol(const char *name,
615 struct module **owner,
616 const unsigned long **crc,
620 struct find_symbol_arg fsa;
626 if (each_symbol_section(find_symbol_in_section, &fsa)) {
634 pr_debug("Failed to find symbol %s\n", name);
637 EXPORT_SYMBOL_GPL(find_symbol);
639 /* Search for module by name: must hold module_mutex. */
640 static struct module *find_module_all(const char *name, size_t len,
645 module_assert_mutex();
647 list_for_each_entry(mod, &modules, list) {
648 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
650 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
656 struct module *find_module(const char *name)
658 return find_module_all(name, strlen(name), false);
660 EXPORT_SYMBOL_GPL(find_module);
664 static inline void __percpu *mod_percpu(struct module *mod)
669 static int percpu_modalloc(struct module *mod, struct load_info *info)
671 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
672 unsigned long align = pcpusec->sh_addralign;
674 if (!pcpusec->sh_size)
677 if (align > PAGE_SIZE) {
678 pr_warn("%s: per-cpu alignment %li > %li\n",
679 mod->name, align, PAGE_SIZE);
683 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
685 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
686 mod->name, (unsigned long)pcpusec->sh_size);
689 mod->percpu_size = pcpusec->sh_size;
693 static void percpu_modfree(struct module *mod)
695 free_percpu(mod->percpu);
698 static unsigned int find_pcpusec(struct load_info *info)
700 return find_sec(info, ".data..percpu");
703 static void percpu_modcopy(struct module *mod,
704 const void *from, unsigned long size)
708 for_each_possible_cpu(cpu)
709 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
713 * is_module_percpu_address - test whether address is from module static percpu
714 * @addr: address to test
716 * Test whether @addr belongs to module static percpu area.
719 * %true if @addr is from module static percpu area
721 bool is_module_percpu_address(unsigned long addr)
728 list_for_each_entry_rcu(mod, &modules, list) {
729 if (mod->state == MODULE_STATE_UNFORMED)
731 if (!mod->percpu_size)
733 for_each_possible_cpu(cpu) {
734 void *start = per_cpu_ptr(mod->percpu, cpu);
736 if ((void *)addr >= start &&
737 (void *)addr < start + mod->percpu_size) {
748 #else /* ... !CONFIG_SMP */
750 static inline void __percpu *mod_percpu(struct module *mod)
754 static int percpu_modalloc(struct module *mod, struct load_info *info)
756 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
757 if (info->sechdrs[info->index.pcpu].sh_size != 0)
761 static inline void percpu_modfree(struct module *mod)
764 static unsigned int find_pcpusec(struct load_info *info)
768 static inline void percpu_modcopy(struct module *mod,
769 const void *from, unsigned long size)
771 /* pcpusec should be 0, and size of that section should be 0. */
774 bool is_module_percpu_address(unsigned long addr)
779 #endif /* CONFIG_SMP */
781 #define MODINFO_ATTR(field) \
782 static void setup_modinfo_##field(struct module *mod, const char *s) \
784 mod->field = kstrdup(s, GFP_KERNEL); \
786 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
787 struct module_kobject *mk, char *buffer) \
789 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
791 static int modinfo_##field##_exists(struct module *mod) \
793 return mod->field != NULL; \
795 static void free_modinfo_##field(struct module *mod) \
800 static struct module_attribute modinfo_##field = { \
801 .attr = { .name = __stringify(field), .mode = 0444 }, \
802 .show = show_modinfo_##field, \
803 .setup = setup_modinfo_##field, \
804 .test = modinfo_##field##_exists, \
805 .free = free_modinfo_##field, \
808 MODINFO_ATTR(version);
809 MODINFO_ATTR(srcversion);
811 static char last_unloaded_module[MODULE_NAME_LEN+1];
813 #ifdef CONFIG_MODULE_UNLOAD
815 EXPORT_TRACEPOINT_SYMBOL(module_get);
817 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
818 #define MODULE_REF_BASE 1
820 /* Init the unload section of the module. */
821 static int module_unload_init(struct module *mod)
824 * Initialize reference counter to MODULE_REF_BASE.
825 * refcnt == 0 means module is going.
827 atomic_set(&mod->refcnt, MODULE_REF_BASE);
829 INIT_LIST_HEAD(&mod->source_list);
830 INIT_LIST_HEAD(&mod->target_list);
832 /* Hold reference count during initialization. */
833 atomic_inc(&mod->refcnt);
838 /* Does a already use b? */
839 static int already_uses(struct module *a, struct module *b)
841 struct module_use *use;
843 list_for_each_entry(use, &b->source_list, source_list) {
844 if (use->source == a) {
845 pr_debug("%s uses %s!\n", a->name, b->name);
849 pr_debug("%s does not use %s!\n", a->name, b->name);
855 * - we add 'a' as a "source", 'b' as a "target" of module use
856 * - the module_use is added to the list of 'b' sources (so
857 * 'b' can walk the list to see who sourced them), and of 'a'
858 * targets (so 'a' can see what modules it targets).
860 static int add_module_usage(struct module *a, struct module *b)
862 struct module_use *use;
864 pr_debug("Allocating new usage for %s.\n", a->name);
865 use = kmalloc(sizeof(*use), GFP_ATOMIC);
867 pr_warn("%s: out of memory loading\n", a->name);
873 list_add(&use->source_list, &b->source_list);
874 list_add(&use->target_list, &a->target_list);
878 /* Module a uses b: caller needs module_mutex() */
879 int ref_module(struct module *a, struct module *b)
883 if (b == NULL || already_uses(a, b))
886 /* If module isn't available, we fail. */
887 err = strong_try_module_get(b);
891 err = add_module_usage(a, b);
898 EXPORT_SYMBOL_GPL(ref_module);
900 /* Clear the unload stuff of the module. */
901 static void module_unload_free(struct module *mod)
903 struct module_use *use, *tmp;
905 mutex_lock(&module_mutex);
906 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
907 struct module *i = use->target;
908 pr_debug("%s unusing %s\n", mod->name, i->name);
910 list_del(&use->source_list);
911 list_del(&use->target_list);
914 mutex_unlock(&module_mutex);
917 #ifdef CONFIG_MODULE_FORCE_UNLOAD
918 static inline int try_force_unload(unsigned int flags)
920 int ret = (flags & O_TRUNC);
922 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
926 static inline int try_force_unload(unsigned int flags)
930 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
932 /* Try to release refcount of module, 0 means success. */
933 static int try_release_module_ref(struct module *mod)
937 /* Try to decrement refcnt which we set at loading */
938 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
941 /* Someone can put this right now, recover with checking */
942 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
947 static int try_stop_module(struct module *mod, int flags, int *forced)
949 /* If it's not unused, quit unless we're forcing. */
950 if (try_release_module_ref(mod) != 0) {
951 *forced = try_force_unload(flags);
956 /* Mark it as dying. */
957 mod->state = MODULE_STATE_GOING;
963 * module_refcount - return the refcount or -1 if unloading
965 * @mod: the module we're checking
968 * -1 if the module is in the process of unloading
969 * otherwise the number of references in the kernel to the module
971 int module_refcount(struct module *mod)
973 return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
975 EXPORT_SYMBOL(module_refcount);
977 /* This exists whether we can unload or not */
978 static void free_module(struct module *mod);
980 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
984 char name[MODULE_NAME_LEN];
987 if (!capable(CAP_SYS_MODULE) || modules_disabled)
990 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
992 name[MODULE_NAME_LEN-1] = '\0';
994 if (mutex_lock_interruptible(&module_mutex) != 0)
997 mod = find_module(name);
1003 if (!list_empty(&mod->source_list)) {
1004 /* Other modules depend on us: get rid of them first. */
1009 /* Doing init or already dying? */
1010 if (mod->state != MODULE_STATE_LIVE) {
1011 /* FIXME: if (force), slam module count damn the torpedoes */
1012 pr_debug("%s already dying\n", mod->name);
1017 /* If it has an init func, it must have an exit func to unload */
1018 if (mod->init && !mod->exit) {
1019 forced = try_force_unload(flags);
1021 /* This module can't be removed */
1027 /* Stop the machine so refcounts can't move and disable module. */
1028 ret = try_stop_module(mod, flags, &forced);
1032 mutex_unlock(&module_mutex);
1033 /* Final destruction now no one is using it. */
1034 if (mod->exit != NULL)
1036 blocking_notifier_call_chain(&module_notify_list,
1037 MODULE_STATE_GOING, mod);
1038 async_synchronize_full();
1040 /* Store the name of the last unloaded module for diagnostic purposes */
1041 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
1046 mutex_unlock(&module_mutex);
1050 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1052 struct module_use *use;
1053 int printed_something = 0;
1055 seq_printf(m, " %i ", module_refcount(mod));
1058 * Always include a trailing , so userspace can differentiate
1059 * between this and the old multi-field proc format.
1061 list_for_each_entry(use, &mod->source_list, source_list) {
1062 printed_something = 1;
1063 seq_printf(m, "%s,", use->source->name);
1066 if (mod->init != NULL && mod->exit == NULL) {
1067 printed_something = 1;
1068 seq_puts(m, "[permanent],");
1071 if (!printed_something)
1075 void __symbol_put(const char *symbol)
1077 struct module *owner;
1080 if (!find_symbol(symbol, &owner, NULL, true, false))
1085 EXPORT_SYMBOL(__symbol_put);
1087 /* Note this assumes addr is a function, which it currently always is. */
1088 void symbol_put_addr(void *addr)
1090 struct module *modaddr;
1091 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
1093 if (core_kernel_text(a))
1096 /* module_text_address is safe here: we're supposed to have reference
1097 * to module from symbol_get, so it can't go away. */
1098 modaddr = __module_text_address(a);
1100 module_put(modaddr);
1102 EXPORT_SYMBOL_GPL(symbol_put_addr);
1104 static ssize_t show_refcnt(struct module_attribute *mattr,
1105 struct module_kobject *mk, char *buffer)
1107 return sprintf(buffer, "%i\n", module_refcount(mk->mod));
1110 static struct module_attribute modinfo_refcnt =
1111 __ATTR(refcnt, 0444, show_refcnt, NULL);
1113 void __module_get(struct module *module)
1117 atomic_inc(&module->refcnt);
1118 trace_module_get(module, _RET_IP_);
1122 EXPORT_SYMBOL(__module_get);
1124 bool try_module_get(struct module *module)
1130 /* Note: here, we can fail to get a reference */
1131 if (likely(module_is_live(module) &&
1132 atomic_inc_not_zero(&module->refcnt) != 0))
1133 trace_module_get(module, _RET_IP_);
1141 EXPORT_SYMBOL(try_module_get);
1143 void module_put(struct module *module)
1149 ret = atomic_dec_if_positive(&module->refcnt);
1150 WARN_ON(ret < 0); /* Failed to put refcount */
1151 trace_module_put(module, _RET_IP_);
1155 EXPORT_SYMBOL(module_put);
1157 #else /* !CONFIG_MODULE_UNLOAD */
1158 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1160 /* We don't know the usage count, or what modules are using. */
1161 seq_puts(m, " - -");
1164 static inline void module_unload_free(struct module *mod)
1168 int ref_module(struct module *a, struct module *b)
1170 return strong_try_module_get(b);
1172 EXPORT_SYMBOL_GPL(ref_module);
1174 static inline int module_unload_init(struct module *mod)
1178 #endif /* CONFIG_MODULE_UNLOAD */
1180 static size_t module_flags_taint(struct module *mod, char *buf)
1184 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1186 if (mod->taints & (1 << TAINT_OOT_MODULE))
1188 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1190 if (mod->taints & (1 << TAINT_CRAP))
1192 if (mod->taints & (1 << TAINT_UNSIGNED_MODULE))
1195 * TAINT_FORCED_RMMOD: could be added.
1196 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1202 static ssize_t show_initstate(struct module_attribute *mattr,
1203 struct module_kobject *mk, char *buffer)
1205 const char *state = "unknown";
1207 switch (mk->mod->state) {
1208 case MODULE_STATE_LIVE:
1211 case MODULE_STATE_COMING:
1214 case MODULE_STATE_GOING:
1220 return sprintf(buffer, "%s\n", state);
1223 static struct module_attribute modinfo_initstate =
1224 __ATTR(initstate, 0444, show_initstate, NULL);
1226 static ssize_t store_uevent(struct module_attribute *mattr,
1227 struct module_kobject *mk,
1228 const char *buffer, size_t count)
1230 enum kobject_action action;
1232 if (kobject_action_type(buffer, count, &action) == 0)
1233 kobject_uevent(&mk->kobj, action);
1237 struct module_attribute module_uevent =
1238 __ATTR(uevent, 0200, NULL, store_uevent);
1240 static ssize_t show_coresize(struct module_attribute *mattr,
1241 struct module_kobject *mk, char *buffer)
1243 return sprintf(buffer, "%u\n", mk->mod->core_size);
1246 static struct module_attribute modinfo_coresize =
1247 __ATTR(coresize, 0444, show_coresize, NULL);
1249 static ssize_t show_initsize(struct module_attribute *mattr,
1250 struct module_kobject *mk, char *buffer)
1252 return sprintf(buffer, "%u\n", mk->mod->init_size);
1255 static struct module_attribute modinfo_initsize =
1256 __ATTR(initsize, 0444, show_initsize, NULL);
1258 static ssize_t show_taint(struct module_attribute *mattr,
1259 struct module_kobject *mk, char *buffer)
1263 l = module_flags_taint(mk->mod, buffer);
1268 static struct module_attribute modinfo_taint =
1269 __ATTR(taint, 0444, show_taint, NULL);
1271 static struct module_attribute *modinfo_attrs[] = {
1274 &modinfo_srcversion,
1279 #ifdef CONFIG_MODULE_UNLOAD
1285 static const char vermagic[] = VERMAGIC_STRING;
1287 static int try_to_force_load(struct module *mod, const char *reason)
1289 #ifdef CONFIG_MODULE_FORCE_LOAD
1290 if (!test_taint(TAINT_FORCED_MODULE))
1291 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1292 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1299 #ifdef CONFIG_MODVERSIONS
1300 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1301 static unsigned long maybe_relocated(unsigned long crc,
1302 const struct module *crc_owner)
1304 #ifdef ARCH_RELOCATES_KCRCTAB
1305 if (crc_owner == NULL)
1306 return crc - (unsigned long)reloc_start;
1311 static int check_version(Elf_Shdr *sechdrs,
1312 unsigned int versindex,
1313 const char *symname,
1315 const unsigned long *crc,
1316 const struct module *crc_owner)
1318 unsigned int i, num_versions;
1319 struct modversion_info *versions;
1321 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1325 /* No versions at all? modprobe --force does this. */
1327 return try_to_force_load(mod, symname) == 0;
1329 versions = (void *) sechdrs[versindex].sh_addr;
1330 num_versions = sechdrs[versindex].sh_size
1331 / sizeof(struct modversion_info);
1333 for (i = 0; i < num_versions; i++) {
1334 if (strcmp(versions[i].name, symname) != 0)
1337 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1339 pr_debug("Found checksum %lX vs module %lX\n",
1340 maybe_relocated(*crc, crc_owner), versions[i].crc);
1344 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
1348 pr_warn("%s: disagrees about version of symbol %s\n",
1349 mod->name, symname);
1353 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1354 unsigned int versindex,
1357 const unsigned long *crc;
1360 * Since this should be found in kernel (which can't be removed), no
1361 * locking is necessary -- use preempt_disable() to placate lockdep.
1364 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1365 &crc, true, false)) {
1370 return check_version(sechdrs, versindex,
1371 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1375 /* First part is kernel version, which we ignore if module has crcs. */
1376 static inline int same_magic(const char *amagic, const char *bmagic,
1380 amagic += strcspn(amagic, " ");
1381 bmagic += strcspn(bmagic, " ");
1383 return strcmp(amagic, bmagic) == 0;
1386 static inline int check_version(Elf_Shdr *sechdrs,
1387 unsigned int versindex,
1388 const char *symname,
1390 const unsigned long *crc,
1391 const struct module *crc_owner)
1396 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1397 unsigned int versindex,
1403 static inline int same_magic(const char *amagic, const char *bmagic,
1406 return strcmp(amagic, bmagic) == 0;
1408 #endif /* CONFIG_MODVERSIONS */
1410 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1411 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1412 const struct load_info *info,
1416 struct module *owner;
1417 const struct kernel_symbol *sym;
1418 const unsigned long *crc;
1422 * The module_mutex should not be a heavily contended lock;
1423 * if we get the occasional sleep here, we'll go an extra iteration
1424 * in the wait_event_interruptible(), which is harmless.
1426 sched_annotate_sleep();
1427 mutex_lock(&module_mutex);
1428 sym = find_symbol(name, &owner, &crc,
1429 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1433 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1435 sym = ERR_PTR(-EINVAL);
1439 err = ref_module(mod, owner);
1446 /* We must make copy under the lock if we failed to get ref. */
1447 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1449 mutex_unlock(&module_mutex);
1453 static const struct kernel_symbol *
1454 resolve_symbol_wait(struct module *mod,
1455 const struct load_info *info,
1458 const struct kernel_symbol *ksym;
1459 char owner[MODULE_NAME_LEN];
1461 if (wait_event_interruptible_timeout(module_wq,
1462 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1463 || PTR_ERR(ksym) != -EBUSY,
1465 pr_warn("%s: gave up waiting for init of module %s.\n",
1472 * /sys/module/foo/sections stuff
1473 * J. Corbet <corbet@lwn.net>
1477 #ifdef CONFIG_KALLSYMS
1478 static inline bool sect_empty(const Elf_Shdr *sect)
1480 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1483 struct module_sect_attr {
1484 struct module_attribute mattr;
1486 unsigned long address;
1489 struct module_sect_attrs {
1490 struct attribute_group grp;
1491 unsigned int nsections;
1492 struct module_sect_attr attrs[0];
1495 static ssize_t module_sect_show(struct module_attribute *mattr,
1496 struct module_kobject *mk, char *buf)
1498 struct module_sect_attr *sattr =
1499 container_of(mattr, struct module_sect_attr, mattr);
1500 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1503 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1505 unsigned int section;
1507 for (section = 0; section < sect_attrs->nsections; section++)
1508 kfree(sect_attrs->attrs[section].name);
1512 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1514 unsigned int nloaded = 0, i, size[2];
1515 struct module_sect_attrs *sect_attrs;
1516 struct module_sect_attr *sattr;
1517 struct attribute **gattr;
1519 /* Count loaded sections and allocate structures */
1520 for (i = 0; i < info->hdr->e_shnum; i++)
1521 if (!sect_empty(&info->sechdrs[i]))
1523 size[0] = ALIGN(sizeof(*sect_attrs)
1524 + nloaded * sizeof(sect_attrs->attrs[0]),
1525 sizeof(sect_attrs->grp.attrs[0]));
1526 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1527 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1528 if (sect_attrs == NULL)
1531 /* Setup section attributes. */
1532 sect_attrs->grp.name = "sections";
1533 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1535 sect_attrs->nsections = 0;
1536 sattr = §_attrs->attrs[0];
1537 gattr = §_attrs->grp.attrs[0];
1538 for (i = 0; i < info->hdr->e_shnum; i++) {
1539 Elf_Shdr *sec = &info->sechdrs[i];
1540 if (sect_empty(sec))
1542 sattr->address = sec->sh_addr;
1543 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1545 if (sattr->name == NULL)
1547 sect_attrs->nsections++;
1548 sysfs_attr_init(&sattr->mattr.attr);
1549 sattr->mattr.show = module_sect_show;
1550 sattr->mattr.store = NULL;
1551 sattr->mattr.attr.name = sattr->name;
1552 sattr->mattr.attr.mode = S_IRUGO;
1553 *(gattr++) = &(sattr++)->mattr.attr;
1557 if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp))
1560 mod->sect_attrs = sect_attrs;
1563 free_sect_attrs(sect_attrs);
1566 static void remove_sect_attrs(struct module *mod)
1568 if (mod->sect_attrs) {
1569 sysfs_remove_group(&mod->mkobj.kobj,
1570 &mod->sect_attrs->grp);
1571 /* We are positive that no one is using any sect attrs
1572 * at this point. Deallocate immediately. */
1573 free_sect_attrs(mod->sect_attrs);
1574 mod->sect_attrs = NULL;
1579 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1582 struct module_notes_attrs {
1583 struct kobject *dir;
1585 struct bin_attribute attrs[0];
1588 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1589 struct bin_attribute *bin_attr,
1590 char *buf, loff_t pos, size_t count)
1593 * The caller checked the pos and count against our size.
1595 memcpy(buf, bin_attr->private + pos, count);
1599 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1602 if (notes_attrs->dir) {
1604 sysfs_remove_bin_file(notes_attrs->dir,
1605 ¬es_attrs->attrs[i]);
1606 kobject_put(notes_attrs->dir);
1611 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1613 unsigned int notes, loaded, i;
1614 struct module_notes_attrs *notes_attrs;
1615 struct bin_attribute *nattr;
1617 /* failed to create section attributes, so can't create notes */
1618 if (!mod->sect_attrs)
1621 /* Count notes sections and allocate structures. */
1623 for (i = 0; i < info->hdr->e_shnum; i++)
1624 if (!sect_empty(&info->sechdrs[i]) &&
1625 (info->sechdrs[i].sh_type == SHT_NOTE))
1631 notes_attrs = kzalloc(sizeof(*notes_attrs)
1632 + notes * sizeof(notes_attrs->attrs[0]),
1634 if (notes_attrs == NULL)
1637 notes_attrs->notes = notes;
1638 nattr = ¬es_attrs->attrs[0];
1639 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1640 if (sect_empty(&info->sechdrs[i]))
1642 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1643 sysfs_bin_attr_init(nattr);
1644 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1645 nattr->attr.mode = S_IRUGO;
1646 nattr->size = info->sechdrs[i].sh_size;
1647 nattr->private = (void *) info->sechdrs[i].sh_addr;
1648 nattr->read = module_notes_read;
1654 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1655 if (!notes_attrs->dir)
1658 for (i = 0; i < notes; ++i)
1659 if (sysfs_create_bin_file(notes_attrs->dir,
1660 ¬es_attrs->attrs[i]))
1663 mod->notes_attrs = notes_attrs;
1667 free_notes_attrs(notes_attrs, i);
1670 static void remove_notes_attrs(struct module *mod)
1672 if (mod->notes_attrs)
1673 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1678 static inline void add_sect_attrs(struct module *mod,
1679 const struct load_info *info)
1683 static inline void remove_sect_attrs(struct module *mod)
1687 static inline void add_notes_attrs(struct module *mod,
1688 const struct load_info *info)
1692 static inline void remove_notes_attrs(struct module *mod)
1695 #endif /* CONFIG_KALLSYMS */
1697 static void add_usage_links(struct module *mod)
1699 #ifdef CONFIG_MODULE_UNLOAD
1700 struct module_use *use;
1703 mutex_lock(&module_mutex);
1704 list_for_each_entry(use, &mod->target_list, target_list) {
1705 nowarn = sysfs_create_link(use->target->holders_dir,
1706 &mod->mkobj.kobj, mod->name);
1708 mutex_unlock(&module_mutex);
1712 static void del_usage_links(struct module *mod)
1714 #ifdef CONFIG_MODULE_UNLOAD
1715 struct module_use *use;
1717 mutex_lock(&module_mutex);
1718 list_for_each_entry(use, &mod->target_list, target_list)
1719 sysfs_remove_link(use->target->holders_dir, mod->name);
1720 mutex_unlock(&module_mutex);
1724 static int module_add_modinfo_attrs(struct module *mod)
1726 struct module_attribute *attr;
1727 struct module_attribute *temp_attr;
1731 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1732 (ARRAY_SIZE(modinfo_attrs) + 1)),
1734 if (!mod->modinfo_attrs)
1737 temp_attr = mod->modinfo_attrs;
1738 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1740 (attr->test && attr->test(mod))) {
1741 memcpy(temp_attr, attr, sizeof(*temp_attr));
1742 sysfs_attr_init(&temp_attr->attr);
1743 error = sysfs_create_file(&mod->mkobj.kobj,
1751 static void module_remove_modinfo_attrs(struct module *mod)
1753 struct module_attribute *attr;
1756 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1757 /* pick a field to test for end of list */
1758 if (!attr->attr.name)
1760 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1764 kfree(mod->modinfo_attrs);
1767 static void mod_kobject_put(struct module *mod)
1769 DECLARE_COMPLETION_ONSTACK(c);
1770 mod->mkobj.kobj_completion = &c;
1771 kobject_put(&mod->mkobj.kobj);
1772 wait_for_completion(&c);
1775 static int mod_sysfs_init(struct module *mod)
1778 struct kobject *kobj;
1780 if (!module_sysfs_initialized) {
1781 pr_err("%s: module sysfs not initialized\n", mod->name);
1786 kobj = kset_find_obj(module_kset, mod->name);
1788 pr_err("%s: module is already loaded\n", mod->name);
1794 mod->mkobj.mod = mod;
1796 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1797 mod->mkobj.kobj.kset = module_kset;
1798 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1801 mod_kobject_put(mod);
1803 /* delay uevent until full sysfs population */
1808 static int mod_sysfs_setup(struct module *mod,
1809 const struct load_info *info,
1810 struct kernel_param *kparam,
1811 unsigned int num_params)
1815 err = mod_sysfs_init(mod);
1819 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1820 if (!mod->holders_dir) {
1825 err = module_param_sysfs_setup(mod, kparam, num_params);
1827 goto out_unreg_holders;
1829 err = module_add_modinfo_attrs(mod);
1831 goto out_unreg_param;
1833 add_usage_links(mod);
1834 add_sect_attrs(mod, info);
1835 add_notes_attrs(mod, info);
1837 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1841 module_param_sysfs_remove(mod);
1843 kobject_put(mod->holders_dir);
1845 mod_kobject_put(mod);
1850 static void mod_sysfs_fini(struct module *mod)
1852 remove_notes_attrs(mod);
1853 remove_sect_attrs(mod);
1854 mod_kobject_put(mod);
1857 #else /* !CONFIG_SYSFS */
1859 static int mod_sysfs_setup(struct module *mod,
1860 const struct load_info *info,
1861 struct kernel_param *kparam,
1862 unsigned int num_params)
1867 static void mod_sysfs_fini(struct module *mod)
1871 static void module_remove_modinfo_attrs(struct module *mod)
1875 static void del_usage_links(struct module *mod)
1879 #endif /* CONFIG_SYSFS */
1881 static void mod_sysfs_teardown(struct module *mod)
1883 del_usage_links(mod);
1884 module_remove_modinfo_attrs(mod);
1885 module_param_sysfs_remove(mod);
1886 kobject_put(mod->mkobj.drivers_dir);
1887 kobject_put(mod->holders_dir);
1888 mod_sysfs_fini(mod);
1891 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1893 * LKM RO/NX protection: protect module's text/ro-data
1894 * from modification and any data from execution.
1896 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1898 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1899 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1901 if (end_pfn > begin_pfn)
1902 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1905 static void set_section_ro_nx(void *base,
1906 unsigned long text_size,
1907 unsigned long ro_size,
1908 unsigned long total_size)
1910 /* begin and end PFNs of the current subsection */
1911 unsigned long begin_pfn;
1912 unsigned long end_pfn;
1915 * Set RO for module text and RO-data:
1916 * - Always protect first page.
1917 * - Do not protect last partial page.
1920 set_page_attributes(base, base + ro_size, set_memory_ro);
1923 * Set NX permissions for module data:
1924 * - Do not protect first partial page.
1925 * - Always protect last page.
1927 if (total_size > text_size) {
1928 begin_pfn = PFN_UP((unsigned long)base + text_size);
1929 end_pfn = PFN_UP((unsigned long)base + total_size);
1930 if (end_pfn > begin_pfn)
1931 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1935 static void unset_module_core_ro_nx(struct module *mod)
1937 set_page_attributes(mod->module_core + mod->core_text_size,
1938 mod->module_core + mod->core_size,
1940 set_page_attributes(mod->module_core,
1941 mod->module_core + mod->core_ro_size,
1945 static void unset_module_init_ro_nx(struct module *mod)
1947 set_page_attributes(mod->module_init + mod->init_text_size,
1948 mod->module_init + mod->init_size,
1950 set_page_attributes(mod->module_init,
1951 mod->module_init + mod->init_ro_size,
1955 /* Iterate through all modules and set each module's text as RW */
1956 void set_all_modules_text_rw(void)
1960 mutex_lock(&module_mutex);
1961 list_for_each_entry_rcu(mod, &modules, list) {
1962 if (mod->state == MODULE_STATE_UNFORMED)
1964 if ((mod->module_core) && (mod->core_text_size)) {
1965 set_page_attributes(mod->module_core,
1966 mod->module_core + mod->core_text_size,
1969 if ((mod->module_init) && (mod->init_text_size)) {
1970 set_page_attributes(mod->module_init,
1971 mod->module_init + mod->init_text_size,
1975 mutex_unlock(&module_mutex);
1978 /* Iterate through all modules and set each module's text as RO */
1979 void set_all_modules_text_ro(void)
1983 mutex_lock(&module_mutex);
1984 list_for_each_entry_rcu(mod, &modules, list) {
1985 if (mod->state == MODULE_STATE_UNFORMED)
1987 if ((mod->module_core) && (mod->core_text_size)) {
1988 set_page_attributes(mod->module_core,
1989 mod->module_core + mod->core_text_size,
1992 if ((mod->module_init) && (mod->init_text_size)) {
1993 set_page_attributes(mod->module_init,
1994 mod->module_init + mod->init_text_size,
1998 mutex_unlock(&module_mutex);
2001 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
2002 static void unset_module_core_ro_nx(struct module *mod) { }
2003 static void unset_module_init_ro_nx(struct module *mod) { }
2006 void __weak module_memfree(void *module_region)
2008 vfree(module_region);
2011 void __weak module_arch_cleanup(struct module *mod)
2015 void __weak module_arch_freeing_init(struct module *mod)
2019 /* Free a module, remove from lists, etc. */
2020 static void free_module(struct module *mod)
2022 trace_module_free(mod);
2024 mod_sysfs_teardown(mod);
2026 /* We leave it in list to prevent duplicate loads, but make sure
2027 * that noone uses it while it's being deconstructed. */
2028 mutex_lock(&module_mutex);
2029 mod->state = MODULE_STATE_UNFORMED;
2030 mutex_unlock(&module_mutex);
2032 /* Remove dynamic debug info */
2033 ddebug_remove_module(mod->name);
2035 /* Arch-specific cleanup. */
2036 module_arch_cleanup(mod);
2038 /* Module unload stuff */
2039 module_unload_free(mod);
2041 /* Free any allocated parameters. */
2042 destroy_params(mod->kp, mod->num_kp);
2044 /* Now we can delete it from the lists */
2045 mutex_lock(&module_mutex);
2046 /* Unlink carefully: kallsyms could be walking list. */
2047 list_del_rcu(&mod->list);
2048 mod_tree_remove(mod);
2049 /* Remove this module from bug list, this uses list_del_rcu */
2050 module_bug_cleanup(mod);
2051 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2052 synchronize_sched();
2053 mutex_unlock(&module_mutex);
2055 /* This may be NULL, but that's OK */
2056 unset_module_init_ro_nx(mod);
2057 module_arch_freeing_init(mod);
2058 module_memfree(mod->module_init);
2060 percpu_modfree(mod);
2062 /* Free lock-classes; relies on the preceding sync_rcu(). */
2063 lockdep_free_key_range(mod->module_core, mod->core_size);
2065 /* Finally, free the core (containing the module structure) */
2066 unset_module_core_ro_nx(mod);
2067 module_memfree(mod->module_core);
2070 update_protections(current->mm);
2074 void *__symbol_get(const char *symbol)
2076 struct module *owner;
2077 const struct kernel_symbol *sym;
2080 sym = find_symbol(symbol, &owner, NULL, true, true);
2081 if (sym && strong_try_module_get(owner))
2085 return sym ? (void *)sym->value : NULL;
2087 EXPORT_SYMBOL_GPL(__symbol_get);
2090 * Ensure that an exported symbol [global namespace] does not already exist
2091 * in the kernel or in some other module's exported symbol table.
2093 * You must hold the module_mutex.
2095 static int verify_export_symbols(struct module *mod)
2098 struct module *owner;
2099 const struct kernel_symbol *s;
2101 const struct kernel_symbol *sym;
2104 { mod->syms, mod->num_syms },
2105 { mod->gpl_syms, mod->num_gpl_syms },
2106 { mod->gpl_future_syms, mod->num_gpl_future_syms },
2107 #ifdef CONFIG_UNUSED_SYMBOLS
2108 { mod->unused_syms, mod->num_unused_syms },
2109 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
2113 for (i = 0; i < ARRAY_SIZE(arr); i++) {
2114 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2115 if (find_symbol(s->name, &owner, NULL, true, false)) {
2116 pr_err("%s: exports duplicate symbol %s"
2118 mod->name, s->name, module_name(owner));
2126 /* Change all symbols so that st_value encodes the pointer directly. */
2127 static int simplify_symbols(struct module *mod, const struct load_info *info)
2129 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2130 Elf_Sym *sym = (void *)symsec->sh_addr;
2131 unsigned long secbase;
2134 const struct kernel_symbol *ksym;
2136 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
2137 const char *name = info->strtab + sym[i].st_name;
2139 switch (sym[i].st_shndx) {
2141 /* Ignore common symbols */
2142 if (!strncmp(name, "__gnu_lto", 9))
2145 /* We compiled with -fno-common. These are not
2146 supposed to happen. */
2147 pr_debug("Common symbol: %s\n", name);
2148 pr_warn("%s: please compile with -fno-common\n",
2154 /* Don't need to do anything */
2155 pr_debug("Absolute symbol: 0x%08lx\n",
2156 (long)sym[i].st_value);
2160 ksym = resolve_symbol_wait(mod, info, name);
2161 /* Ok if resolved. */
2162 if (ksym && !IS_ERR(ksym)) {
2163 sym[i].st_value = ksym->value;
2168 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2171 pr_warn("%s: Unknown symbol %s (err %li)\n",
2172 mod->name, name, PTR_ERR(ksym));
2173 ret = PTR_ERR(ksym) ?: -ENOENT;
2177 /* Divert to percpu allocation if a percpu var. */
2178 if (sym[i].st_shndx == info->index.pcpu)
2179 secbase = (unsigned long)mod_percpu(mod);
2181 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2182 sym[i].st_value += secbase;
2190 static int apply_relocations(struct module *mod, const struct load_info *info)
2195 /* Now do relocations. */
2196 for (i = 1; i < info->hdr->e_shnum; i++) {
2197 unsigned int infosec = info->sechdrs[i].sh_info;
2199 /* Not a valid relocation section? */
2200 if (infosec >= info->hdr->e_shnum)
2203 /* Don't bother with non-allocated sections */
2204 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2207 if (info->sechdrs[i].sh_type == SHT_REL)
2208 err = apply_relocate(info->sechdrs, info->strtab,
2209 info->index.sym, i, mod);
2210 else if (info->sechdrs[i].sh_type == SHT_RELA)
2211 err = apply_relocate_add(info->sechdrs, info->strtab,
2212 info->index.sym, i, mod);
2219 /* Additional bytes needed by arch in front of individual sections */
2220 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2221 unsigned int section)
2223 /* default implementation just returns zero */
2227 /* Update size with this section: return offset. */
2228 static long get_offset(struct module *mod, unsigned int *size,
2229 Elf_Shdr *sechdr, unsigned int section)
2233 *size += arch_mod_section_prepend(mod, section);
2234 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2235 *size = ret + sechdr->sh_size;
2239 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2240 might -- code, read-only data, read-write data, small data. Tally
2241 sizes, and place the offsets into sh_entsize fields: high bit means it
2243 static void layout_sections(struct module *mod, struct load_info *info)
2245 static unsigned long const masks[][2] = {
2246 /* NOTE: all executable code must be the first section
2247 * in this array; otherwise modify the text_size
2248 * finder in the two loops below */
2249 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2250 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2251 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2252 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2256 for (i = 0; i < info->hdr->e_shnum; i++)
2257 info->sechdrs[i].sh_entsize = ~0UL;
2259 pr_debug("Core section allocation order:\n");
2260 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2261 for (i = 0; i < info->hdr->e_shnum; ++i) {
2262 Elf_Shdr *s = &info->sechdrs[i];
2263 const char *sname = info->secstrings + s->sh_name;
2265 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2266 || (s->sh_flags & masks[m][1])
2267 || s->sh_entsize != ~0UL
2268 || strstarts(sname, ".init"))
2270 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2271 pr_debug("\t%s\n", sname);
2274 case 0: /* executable */
2275 mod->core_size = debug_align(mod->core_size);
2276 mod->core_text_size = mod->core_size;
2278 case 1: /* RO: text and ro-data */
2279 mod->core_size = debug_align(mod->core_size);
2280 mod->core_ro_size = mod->core_size;
2282 case 3: /* whole core */
2283 mod->core_size = debug_align(mod->core_size);
2288 pr_debug("Init section allocation order:\n");
2289 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2290 for (i = 0; i < info->hdr->e_shnum; ++i) {
2291 Elf_Shdr *s = &info->sechdrs[i];
2292 const char *sname = info->secstrings + s->sh_name;
2294 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2295 || (s->sh_flags & masks[m][1])
2296 || s->sh_entsize != ~0UL
2297 || !strstarts(sname, ".init"))
2299 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2300 | INIT_OFFSET_MASK);
2301 pr_debug("\t%s\n", sname);
2304 case 0: /* executable */
2305 mod->init_size = debug_align(mod->init_size);
2306 mod->init_text_size = mod->init_size;
2308 case 1: /* RO: text and ro-data */
2309 mod->init_size = debug_align(mod->init_size);
2310 mod->init_ro_size = mod->init_size;
2312 case 3: /* whole init */
2313 mod->init_size = debug_align(mod->init_size);
2319 static void set_license(struct module *mod, const char *license)
2322 license = "unspecified";
2324 if (!license_is_gpl_compatible(license)) {
2325 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2326 pr_warn("%s: module license '%s' taints kernel.\n",
2327 mod->name, license);
2328 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2329 LOCKDEP_NOW_UNRELIABLE);
2333 /* Parse tag=value strings from .modinfo section */
2334 static char *next_string(char *string, unsigned long *secsize)
2336 /* Skip non-zero chars */
2339 if ((*secsize)-- <= 1)
2343 /* Skip any zero padding. */
2344 while (!string[0]) {
2346 if ((*secsize)-- <= 1)
2352 static char *get_modinfo(struct load_info *info, const char *tag)
2355 unsigned int taglen = strlen(tag);
2356 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2357 unsigned long size = infosec->sh_size;
2359 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2360 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2361 return p + taglen + 1;
2366 static void setup_modinfo(struct module *mod, struct load_info *info)
2368 struct module_attribute *attr;
2371 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2373 attr->setup(mod, get_modinfo(info, attr->attr.name));
2377 static void free_modinfo(struct module *mod)
2379 struct module_attribute *attr;
2382 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2388 #ifdef CONFIG_KALLSYMS
2390 /* lookup symbol in given range of kernel_symbols */
2391 static const struct kernel_symbol *lookup_symbol(const char *name,
2392 const struct kernel_symbol *start,
2393 const struct kernel_symbol *stop)
2395 return bsearch(name, start, stop - start,
2396 sizeof(struct kernel_symbol), cmp_name);
2399 static int is_exported(const char *name, unsigned long value,
2400 const struct module *mod)
2402 const struct kernel_symbol *ks;
2404 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2406 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2407 return ks != NULL && ks->value == value;
2411 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2413 const Elf_Shdr *sechdrs = info->sechdrs;
2415 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2416 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2421 if (sym->st_shndx == SHN_UNDEF)
2423 if (sym->st_shndx == SHN_ABS)
2425 if (sym->st_shndx >= SHN_LORESERVE)
2427 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2429 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2430 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2431 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2433 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2438 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2439 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2444 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2451 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2454 const Elf_Shdr *sec;
2456 if (src->st_shndx == SHN_UNDEF
2457 || src->st_shndx >= shnum
2461 sec = sechdrs + src->st_shndx;
2462 if (!(sec->sh_flags & SHF_ALLOC)
2463 #ifndef CONFIG_KALLSYMS_ALL
2464 || !(sec->sh_flags & SHF_EXECINSTR)
2466 || (sec->sh_entsize & INIT_OFFSET_MASK))
2473 * We only allocate and copy the strings needed by the parts of symtab
2474 * we keep. This is simple, but has the effect of making multiple
2475 * copies of duplicates. We could be more sophisticated, see
2476 * linux-kernel thread starting with
2477 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2479 static void layout_symtab(struct module *mod, struct load_info *info)
2481 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2482 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2484 unsigned int i, nsrc, ndst, strtab_size = 0;
2486 /* Put symbol section at end of init part of module. */
2487 symsect->sh_flags |= SHF_ALLOC;
2488 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2489 info->index.sym) | INIT_OFFSET_MASK;
2490 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2492 src = (void *)info->hdr + symsect->sh_offset;
2493 nsrc = symsect->sh_size / sizeof(*src);
2495 /* Compute total space required for the core symbols' strtab. */
2496 for (ndst = i = 0; i < nsrc; i++) {
2498 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2499 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2504 /* Append room for core symbols at end of core part. */
2505 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2506 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2507 mod->core_size += strtab_size;
2508 mod->core_size = debug_align(mod->core_size);
2510 /* Put string table section at end of init part of module. */
2511 strsect->sh_flags |= SHF_ALLOC;
2512 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2513 info->index.str) | INIT_OFFSET_MASK;
2514 mod->init_size = debug_align(mod->init_size);
2515 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2518 static void add_kallsyms(struct module *mod, const struct load_info *info)
2520 unsigned int i, ndst;
2524 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2526 mod->symtab = (void *)symsec->sh_addr;
2527 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2528 /* Make sure we get permanent strtab: don't use info->strtab. */
2529 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2531 /* Set types up while we still have access to sections. */
2532 for (i = 0; i < mod->num_symtab; i++)
2533 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2535 mod->core_symtab = dst = mod->module_core + info->symoffs;
2536 mod->core_strtab = s = mod->module_core + info->stroffs;
2538 for (ndst = i = 0; i < mod->num_symtab; i++) {
2540 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2542 dst[ndst++].st_name = s - mod->core_strtab;
2543 s += strlcpy(s, &mod->strtab[src[i].st_name],
2547 mod->core_num_syms = ndst;
2550 static inline void layout_symtab(struct module *mod, struct load_info *info)
2554 static void add_kallsyms(struct module *mod, const struct load_info *info)
2557 #endif /* CONFIG_KALLSYMS */
2559 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2563 #ifdef CONFIG_DYNAMIC_DEBUG
2564 if (ddebug_add_module(debug, num, debug->modname))
2565 pr_err("dynamic debug error adding module: %s\n",
2570 static void dynamic_debug_remove(struct _ddebug *debug)
2573 ddebug_remove_module(debug->modname);
2576 void * __weak module_alloc(unsigned long size)
2578 return vmalloc_exec(size);
2581 #ifdef CONFIG_DEBUG_KMEMLEAK
2582 static void kmemleak_load_module(const struct module *mod,
2583 const struct load_info *info)
2587 /* only scan the sections containing data */
2588 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2590 for (i = 1; i < info->hdr->e_shnum; i++) {
2591 /* Scan all writable sections that's not executable */
2592 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2593 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2594 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2597 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2598 info->sechdrs[i].sh_size, GFP_KERNEL);
2602 static inline void kmemleak_load_module(const struct module *mod,
2603 const struct load_info *info)
2608 #ifdef CONFIG_MODULE_SIG
2609 static int module_sig_check(struct load_info *info)
2612 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2613 const void *mod = info->hdr;
2615 if (info->len > markerlen &&
2616 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2617 /* We truncate the module to discard the signature */
2618 info->len -= markerlen;
2619 err = mod_verify_sig(mod, &info->len);
2623 info->sig_ok = true;
2627 /* Not having a signature is only an error if we're strict. */
2628 if (err == -ENOKEY && !sig_enforce)
2633 #else /* !CONFIG_MODULE_SIG */
2634 static int module_sig_check(struct load_info *info)
2638 #endif /* !CONFIG_MODULE_SIG */
2640 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2641 static int elf_header_check(struct load_info *info)
2643 if (info->len < sizeof(*(info->hdr)))
2646 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2647 || info->hdr->e_type != ET_REL
2648 || !elf_check_arch(info->hdr)
2649 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2652 if (info->hdr->e_shoff >= info->len
2653 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2654 info->len - info->hdr->e_shoff))
2660 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2662 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2665 unsigned long n = min(len, COPY_CHUNK_SIZE);
2667 if (copy_from_user(dst, usrc, n) != 0)
2677 /* Sets info->hdr and info->len. */
2678 static int copy_module_from_user(const void __user *umod, unsigned long len,
2679 struct load_info *info)
2684 if (info->len < sizeof(*(info->hdr)))
2687 err = security_kernel_module_from_file(NULL);
2691 /* Suck in entire file: we'll want most of it. */
2692 info->hdr = __vmalloc(info->len,
2693 GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL);
2697 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2705 /* Sets info->hdr and info->len. */
2706 static int copy_module_from_fd(int fd, struct load_info *info)
2708 struct fd f = fdget(fd);
2717 err = security_kernel_module_from_file(f.file);
2721 err = vfs_getattr(&f.file->f_path, &stat);
2725 if (stat.size > INT_MAX) {
2730 /* Don't hand 0 to vmalloc, it whines. */
2731 if (stat.size == 0) {
2736 info->hdr = vmalloc(stat.size);
2743 while (pos < stat.size) {
2744 bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
2762 static void free_copy(struct load_info *info)
2767 static int rewrite_section_headers(struct load_info *info, int flags)
2771 /* This should always be true, but let's be sure. */
2772 info->sechdrs[0].sh_addr = 0;
2774 for (i = 1; i < info->hdr->e_shnum; i++) {
2775 Elf_Shdr *shdr = &info->sechdrs[i];
2776 if (shdr->sh_type != SHT_NOBITS
2777 && info->len < shdr->sh_offset + shdr->sh_size) {
2778 pr_err("Module len %lu truncated\n", info->len);
2782 /* Mark all sections sh_addr with their address in the
2784 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2786 #ifndef CONFIG_MODULE_UNLOAD
2787 /* Don't load .exit sections */
2788 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2789 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2793 /* Track but don't keep modinfo and version sections. */
2794 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2795 info->index.vers = 0; /* Pretend no __versions section! */
2797 info->index.vers = find_sec(info, "__versions");
2798 info->index.info = find_sec(info, ".modinfo");
2799 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2800 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2805 * Set up our basic convenience variables (pointers to section headers,
2806 * search for module section index etc), and do some basic section
2809 * Return the temporary module pointer (we'll replace it with the final
2810 * one when we move the module sections around).
2812 static struct module *setup_load_info(struct load_info *info, int flags)
2818 /* Set up the convenience variables */
2819 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2820 info->secstrings = (void *)info->hdr
2821 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2823 err = rewrite_section_headers(info, flags);
2825 return ERR_PTR(err);
2827 /* Find internal symbols and strings. */
2828 for (i = 1; i < info->hdr->e_shnum; i++) {
2829 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2830 info->index.sym = i;
2831 info->index.str = info->sechdrs[i].sh_link;
2832 info->strtab = (char *)info->hdr
2833 + info->sechdrs[info->index.str].sh_offset;
2838 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2839 if (!info->index.mod) {
2840 pr_warn("No module found in object\n");
2841 return ERR_PTR(-ENOEXEC);
2843 /* This is temporary: point mod into copy of data. */
2844 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2846 if (info->index.sym == 0) {
2847 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
2848 return ERR_PTR(-ENOEXEC);
2851 info->index.pcpu = find_pcpusec(info);
2853 /* Check module struct version now, before we try to use module. */
2854 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2855 return ERR_PTR(-ENOEXEC);
2860 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2862 const char *modmagic = get_modinfo(info, "vermagic");
2865 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2868 /* This is allowed: modprobe --force will invalidate it. */
2870 err = try_to_force_load(mod, "bad vermagic");
2873 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2874 pr_err("%s: version magic '%s' should be '%s'\n",
2875 mod->name, modmagic, vermagic);
2879 if (!get_modinfo(info, "intree"))
2880 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2882 if (get_modinfo(info, "staging")) {
2883 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2884 pr_warn("%s: module is from the staging directory, the quality "
2885 "is unknown, you have been warned.\n", mod->name);
2888 /* Set up license info based on the info section */
2889 set_license(mod, get_modinfo(info, "license"));
2894 static int find_module_sections(struct module *mod, struct load_info *info)
2896 mod->kp = section_objs(info, "__param",
2897 sizeof(*mod->kp), &mod->num_kp);
2898 mod->syms = section_objs(info, "__ksymtab",
2899 sizeof(*mod->syms), &mod->num_syms);
2900 mod->crcs = section_addr(info, "__kcrctab");
2901 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2902 sizeof(*mod->gpl_syms),
2903 &mod->num_gpl_syms);
2904 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2905 mod->gpl_future_syms = section_objs(info,
2906 "__ksymtab_gpl_future",
2907 sizeof(*mod->gpl_future_syms),
2908 &mod->num_gpl_future_syms);
2909 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2911 #ifdef CONFIG_UNUSED_SYMBOLS
2912 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2913 sizeof(*mod->unused_syms),
2914 &mod->num_unused_syms);
2915 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2916 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2917 sizeof(*mod->unused_gpl_syms),
2918 &mod->num_unused_gpl_syms);
2919 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2921 #ifdef CONFIG_CONSTRUCTORS
2922 mod->ctors = section_objs(info, ".ctors",
2923 sizeof(*mod->ctors), &mod->num_ctors);
2925 mod->ctors = section_objs(info, ".init_array",
2926 sizeof(*mod->ctors), &mod->num_ctors);
2927 else if (find_sec(info, ".init_array")) {
2929 * This shouldn't happen with same compiler and binutils
2930 * building all parts of the module.
2932 pr_warn("%s: has both .ctors and .init_array.\n",
2938 #ifdef CONFIG_TRACEPOINTS
2939 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2940 sizeof(*mod->tracepoints_ptrs),
2941 &mod->num_tracepoints);
2943 #ifdef HAVE_JUMP_LABEL
2944 mod->jump_entries = section_objs(info, "__jump_table",
2945 sizeof(*mod->jump_entries),
2946 &mod->num_jump_entries);
2948 #ifdef CONFIG_EVENT_TRACING
2949 mod->trace_events = section_objs(info, "_ftrace_events",
2950 sizeof(*mod->trace_events),
2951 &mod->num_trace_events);
2952 mod->trace_enums = section_objs(info, "_ftrace_enum_map",
2953 sizeof(*mod->trace_enums),
2954 &mod->num_trace_enums);
2956 #ifdef CONFIG_TRACING
2957 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2958 sizeof(*mod->trace_bprintk_fmt_start),
2959 &mod->num_trace_bprintk_fmt);
2961 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2962 /* sechdrs[0].sh_size is always zero */
2963 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2964 sizeof(*mod->ftrace_callsites),
2965 &mod->num_ftrace_callsites);
2968 mod->extable = section_objs(info, "__ex_table",
2969 sizeof(*mod->extable), &mod->num_exentries);
2971 if (section_addr(info, "__obsparm"))
2972 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
2974 info->debug = section_objs(info, "__verbose",
2975 sizeof(*info->debug), &info->num_debug);
2980 static int move_module(struct module *mod, struct load_info *info)
2985 /* Do the allocs. */
2986 ptr = module_alloc(mod->core_size);
2988 * The pointer to this block is stored in the module structure
2989 * which is inside the block. Just mark it as not being a
2992 kmemleak_not_leak(ptr);
2996 memset(ptr, 0, mod->core_size);
2997 mod->module_core = ptr;
2999 if (mod->init_size) {
3000 ptr = module_alloc(mod->init_size);
3002 * The pointer to this block is stored in the module structure
3003 * which is inside the block. This block doesn't need to be
3004 * scanned as it contains data and code that will be freed
3005 * after the module is initialized.
3007 kmemleak_ignore(ptr);
3009 module_memfree(mod->module_core);
3012 memset(ptr, 0, mod->init_size);
3013 mod->module_init = ptr;
3015 mod->module_init = NULL;
3017 /* Transfer each section which specifies SHF_ALLOC */
3018 pr_debug("final section addresses:\n");
3019 for (i = 0; i < info->hdr->e_shnum; i++) {
3021 Elf_Shdr *shdr = &info->sechdrs[i];
3023 if (!(shdr->sh_flags & SHF_ALLOC))
3026 if (shdr->sh_entsize & INIT_OFFSET_MASK)
3027 dest = mod->module_init
3028 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3030 dest = mod->module_core + shdr->sh_entsize;
3032 if (shdr->sh_type != SHT_NOBITS)
3033 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
3034 /* Update sh_addr to point to copy in image. */
3035 shdr->sh_addr = (unsigned long)dest;
3036 pr_debug("\t0x%lx %s\n",
3037 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
3043 static int check_module_license_and_versions(struct module *mod)
3046 * ndiswrapper is under GPL by itself, but loads proprietary modules.
3047 * Don't use add_taint_module(), as it would prevent ndiswrapper from
3048 * using GPL-only symbols it needs.
3050 if (strcmp(mod->name, "ndiswrapper") == 0)
3051 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
3053 /* driverloader was caught wrongly pretending to be under GPL */
3054 if (strcmp(mod->name, "driverloader") == 0)
3055 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3056 LOCKDEP_NOW_UNRELIABLE);
3058 /* lve claims to be GPL but upstream won't provide source */
3059 if (strcmp(mod->name, "lve") == 0)
3060 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3061 LOCKDEP_NOW_UNRELIABLE);
3063 #ifdef CONFIG_MODVERSIONS
3064 if ((mod->num_syms && !mod->crcs)
3065 || (mod->num_gpl_syms && !mod->gpl_crcs)
3066 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
3067 #ifdef CONFIG_UNUSED_SYMBOLS
3068 || (mod->num_unused_syms && !mod->unused_crcs)
3069 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
3072 return try_to_force_load(mod,
3073 "no versions for exported symbols");
3079 static void flush_module_icache(const struct module *mod)
3081 mm_segment_t old_fs;
3083 /* flush the icache in correct context */
3088 * Flush the instruction cache, since we've played with text.
3089 * Do it before processing of module parameters, so the module
3090 * can provide parameter accessor functions of its own.
3092 if (mod->module_init)
3093 flush_icache_range((unsigned long)mod->module_init,
3094 (unsigned long)mod->module_init
3096 flush_icache_range((unsigned long)mod->module_core,
3097 (unsigned long)mod->module_core + mod->core_size);
3102 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
3110 static struct module *layout_and_allocate(struct load_info *info, int flags)
3112 /* Module within temporary copy. */
3116 mod = setup_load_info(info, flags);
3120 err = check_modinfo(mod, info, flags);
3122 return ERR_PTR(err);
3124 /* Allow arches to frob section contents and sizes. */
3125 err = module_frob_arch_sections(info->hdr, info->sechdrs,
3126 info->secstrings, mod);
3128 return ERR_PTR(err);
3130 /* We will do a special allocation for per-cpu sections later. */
3131 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
3133 /* Determine total sizes, and put offsets in sh_entsize. For now
3134 this is done generically; there doesn't appear to be any
3135 special cases for the architectures. */
3136 layout_sections(mod, info);
3137 layout_symtab(mod, info);
3139 /* Allocate and move to the final place */
3140 err = move_module(mod, info);
3142 return ERR_PTR(err);
3144 /* Module has been copied to its final place now: return it. */
3145 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
3146 kmemleak_load_module(mod, info);
3150 /* mod is no longer valid after this! */
3151 static void module_deallocate(struct module *mod, struct load_info *info)
3153 percpu_modfree(mod);
3154 module_arch_freeing_init(mod);
3155 module_memfree(mod->module_init);
3156 module_memfree(mod->module_core);
3159 int __weak module_finalize(const Elf_Ehdr *hdr,
3160 const Elf_Shdr *sechdrs,
3166 static int post_relocation(struct module *mod, const struct load_info *info)
3168 /* Sort exception table now relocations are done. */
3169 sort_extable(mod->extable, mod->extable + mod->num_exentries);
3171 /* Copy relocated percpu area over. */
3172 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3173 info->sechdrs[info->index.pcpu].sh_size);
3175 /* Setup kallsyms-specific fields. */
3176 add_kallsyms(mod, info);
3178 /* Arch-specific module finalizing. */
3179 return module_finalize(info->hdr, info->sechdrs, mod);
3182 /* Is this module of this name done loading? No locks held. */
3183 static bool finished_loading(const char *name)
3189 * The module_mutex should not be a heavily contended lock;
3190 * if we get the occasional sleep here, we'll go an extra iteration
3191 * in the wait_event_interruptible(), which is harmless.
3193 sched_annotate_sleep();
3194 mutex_lock(&module_mutex);
3195 mod = find_module_all(name, strlen(name), true);
3196 ret = !mod || mod->state == MODULE_STATE_LIVE
3197 || mod->state == MODULE_STATE_GOING;
3198 mutex_unlock(&module_mutex);
3203 /* Call module constructors. */
3204 static void do_mod_ctors(struct module *mod)
3206 #ifdef CONFIG_CONSTRUCTORS
3209 for (i = 0; i < mod->num_ctors; i++)
3214 /* For freeing module_init on success, in case kallsyms traversing */
3215 struct mod_initfree {
3216 struct rcu_head rcu;
3220 static void do_free_init(struct rcu_head *head)
3222 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3223 module_memfree(m->module_init);
3228 * This is where the real work happens.
3230 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3231 * helper command 'lx-symbols'.
3233 static noinline int do_init_module(struct module *mod)
3236 struct mod_initfree *freeinit;
3238 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3243 freeinit->module_init = mod->module_init;
3246 * We want to find out whether @mod uses async during init. Clear
3247 * PF_USED_ASYNC. async_schedule*() will set it.
3249 current->flags &= ~PF_USED_ASYNC;
3252 /* Start the module */
3253 if (mod->init != NULL)
3254 ret = do_one_initcall(mod->init);
3256 goto fail_free_freeinit;
3259 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3260 "follow 0/-E convention\n"
3261 "%s: loading module anyway...\n",
3262 __func__, mod->name, ret, __func__);
3266 /* Now it's a first class citizen! */
3267 mod->state = MODULE_STATE_LIVE;
3268 blocking_notifier_call_chain(&module_notify_list,
3269 MODULE_STATE_LIVE, mod);
3272 * We need to finish all async code before the module init sequence
3273 * is done. This has potential to deadlock. For example, a newly
3274 * detected block device can trigger request_module() of the
3275 * default iosched from async probing task. Once userland helper
3276 * reaches here, async_synchronize_full() will wait on the async
3277 * task waiting on request_module() and deadlock.
3279 * This deadlock is avoided by perfomring async_synchronize_full()
3280 * iff module init queued any async jobs. This isn't a full
3281 * solution as it will deadlock the same if module loading from
3282 * async jobs nests more than once; however, due to the various
3283 * constraints, this hack seems to be the best option for now.
3284 * Please refer to the following thread for details.
3286 * http://thread.gmane.org/gmane.linux.kernel/1420814
3288 if (current->flags & PF_USED_ASYNC)
3289 async_synchronize_full();
3291 mutex_lock(&module_mutex);
3292 /* Drop initial reference. */
3294 trim_init_extable(mod);
3295 #ifdef CONFIG_KALLSYMS
3296 mod->num_symtab = mod->core_num_syms;
3297 mod->symtab = mod->core_symtab;
3298 mod->strtab = mod->core_strtab;
3300 mod_tree_remove_init(mod);
3301 unset_module_init_ro_nx(mod);
3302 module_arch_freeing_init(mod);
3303 mod->module_init = NULL;
3305 mod->init_ro_size = 0;
3306 mod->init_text_size = 0;
3308 * We want to free module_init, but be aware that kallsyms may be
3309 * walking this with preempt disabled. In all the failure paths, we
3310 * call synchronize_sched(), but we don't want to slow down the success
3311 * path, so use actual RCU here.
3313 call_rcu_sched(&freeinit->rcu, do_free_init);
3314 mutex_unlock(&module_mutex);
3315 wake_up_all(&module_wq);
3322 /* Try to protect us from buggy refcounters. */
3323 mod->state = MODULE_STATE_GOING;
3324 synchronize_sched();
3326 blocking_notifier_call_chain(&module_notify_list,
3327 MODULE_STATE_GOING, mod);
3329 wake_up_all(&module_wq);
3333 static int may_init_module(void)
3335 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3342 * We try to place it in the list now to make sure it's unique before
3343 * we dedicate too many resources. In particular, temporary percpu
3344 * memory exhaustion.
3346 static int add_unformed_module(struct module *mod)
3351 mod->state = MODULE_STATE_UNFORMED;
3354 mutex_lock(&module_mutex);
3355 old = find_module_all(mod->name, strlen(mod->name), true);
3357 if (old->state == MODULE_STATE_COMING
3358 || old->state == MODULE_STATE_UNFORMED) {
3359 /* Wait in case it fails to load. */
3360 mutex_unlock(&module_mutex);
3361 err = wait_event_interruptible(module_wq,
3362 finished_loading(mod->name));
3370 mod_update_bounds(mod);
3371 list_add_rcu(&mod->list, &modules);
3372 mod_tree_insert(mod);
3376 mutex_unlock(&module_mutex);
3381 static int complete_formation(struct module *mod, struct load_info *info)
3385 mutex_lock(&module_mutex);
3387 /* Find duplicate symbols (must be called under lock). */
3388 err = verify_export_symbols(mod);
3392 /* This relies on module_mutex for list integrity. */
3393 module_bug_finalize(info->hdr, info->sechdrs, mod);
3395 /* Set RO and NX regions for core */
3396 set_section_ro_nx(mod->module_core,
3397 mod->core_text_size,
3401 /* Set RO and NX regions for init */
3402 set_section_ro_nx(mod->module_init,
3403 mod->init_text_size,
3407 /* Mark state as coming so strong_try_module_get() ignores us,
3408 * but kallsyms etc. can see us. */
3409 mod->state = MODULE_STATE_COMING;
3410 mutex_unlock(&module_mutex);
3412 blocking_notifier_call_chain(&module_notify_list,
3413 MODULE_STATE_COMING, mod);
3417 mutex_unlock(&module_mutex);
3421 static int unknown_module_param_cb(char *param, char *val, const char *modname)
3423 /* Check for magic 'dyndbg' arg */
3424 int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3426 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3430 /* Allocate and load the module: note that size of section 0 is always
3431 zero, and we rely on this for optional sections. */
3432 static int load_module(struct load_info *info, const char __user *uargs,
3439 err = module_sig_check(info);
3443 err = elf_header_check(info);
3447 /* Figure out module layout, and allocate all the memory. */
3448 mod = layout_and_allocate(info, flags);
3454 /* Reserve our place in the list. */
3455 err = add_unformed_module(mod);
3459 #ifdef CONFIG_MODULE_SIG
3460 mod->sig_ok = info->sig_ok;
3462 pr_notice_once("%s: module verification failed: signature "
3463 "and/or required key missing - tainting "
3464 "kernel\n", mod->name);
3465 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3469 /* To avoid stressing percpu allocator, do this once we're unique. */
3470 err = percpu_modalloc(mod, info);
3474 /* Now module is in final location, initialize linked lists, etc. */
3475 err = module_unload_init(mod);
3479 /* Now we've got everything in the final locations, we can
3480 * find optional sections. */
3481 err = find_module_sections(mod, info);
3485 err = check_module_license_and_versions(mod);
3489 /* Set up MODINFO_ATTR fields */
3490 setup_modinfo(mod, info);
3492 /* Fix up syms, so that st_value is a pointer to location. */
3493 err = simplify_symbols(mod, info);
3497 err = apply_relocations(mod, info);
3501 err = post_relocation(mod, info);
3505 flush_module_icache(mod);
3507 /* Now copy in args */
3508 mod->args = strndup_user(uargs, ~0UL >> 1);
3509 if (IS_ERR(mod->args)) {
3510 err = PTR_ERR(mod->args);
3511 goto free_arch_cleanup;
3514 dynamic_debug_setup(info->debug, info->num_debug);
3516 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3517 ftrace_module_init(mod);
3519 /* Finally it's fully formed, ready to start executing. */
3520 err = complete_formation(mod, info);
3522 goto ddebug_cleanup;
3524 /* Module is ready to execute: parsing args may do that. */
3525 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3526 -32768, 32767, unknown_module_param_cb);
3527 if (IS_ERR(after_dashes)) {
3528 err = PTR_ERR(after_dashes);
3530 } else if (after_dashes) {
3531 pr_warn("%s: parameters '%s' after `--' ignored\n",
3532 mod->name, after_dashes);
3535 /* Link in to syfs. */
3536 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3540 /* Get rid of temporary copy. */
3544 trace_module_load(mod);
3546 return do_init_module(mod);
3549 /* module_bug_cleanup needs module_mutex protection */
3550 mutex_lock(&module_mutex);
3551 module_bug_cleanup(mod);
3552 mutex_unlock(&module_mutex);
3554 /* we can't deallocate the module until we clear memory protection */
3555 unset_module_init_ro_nx(mod);
3556 unset_module_core_ro_nx(mod);
3559 dynamic_debug_remove(info->debug);
3560 synchronize_sched();
3563 module_arch_cleanup(mod);
3567 module_unload_free(mod);
3569 mutex_lock(&module_mutex);
3570 /* Unlink carefully: kallsyms could be walking list. */
3571 list_del_rcu(&mod->list);
3572 wake_up_all(&module_wq);
3573 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3574 synchronize_sched();
3575 mutex_unlock(&module_mutex);
3577 /* Free lock-classes; relies on the preceding sync_rcu() */
3578 lockdep_free_key_range(mod->module_core, mod->core_size);
3580 module_deallocate(mod, info);
3586 SYSCALL_DEFINE3(init_module, void __user *, umod,
3587 unsigned long, len, const char __user *, uargs)
3590 struct load_info info = { };
3592 err = may_init_module();
3596 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3599 err = copy_module_from_user(umod, len, &info);
3603 return load_module(&info, uargs, 0);
3606 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3609 struct load_info info = { };
3611 err = may_init_module();
3615 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3617 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3618 |MODULE_INIT_IGNORE_VERMAGIC))
3621 err = copy_module_from_fd(fd, &info);
3625 return load_module(&info, uargs, flags);
3628 static inline int within(unsigned long addr, void *start, unsigned long size)
3630 return ((void *)addr >= start && (void *)addr < start + size);
3633 #ifdef CONFIG_KALLSYMS
3635 * This ignores the intensely annoying "mapping symbols" found
3636 * in ARM ELF files: $a, $t and $d.
3638 static inline int is_arm_mapping_symbol(const char *str)
3640 if (str[0] == '.' && str[1] == 'L')
3642 return str[0] == '$' && strchr("axtd", str[1])
3643 && (str[2] == '\0' || str[2] == '.');
3646 static const char *get_ksymbol(struct module *mod,
3648 unsigned long *size,
3649 unsigned long *offset)
3651 unsigned int i, best = 0;
3652 unsigned long nextval;
3654 /* At worse, next value is at end of module */
3655 if (within_module_init(addr, mod))
3656 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3658 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3660 /* Scan for closest preceding symbol, and next symbol. (ELF
3661 starts real symbols at 1). */
3662 for (i = 1; i < mod->num_symtab; i++) {
3663 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3666 /* We ignore unnamed symbols: they're uninformative
3667 * and inserted at a whim. */
3668 if (mod->symtab[i].st_value <= addr
3669 && mod->symtab[i].st_value > mod->symtab[best].st_value
3670 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3671 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3673 if (mod->symtab[i].st_value > addr
3674 && mod->symtab[i].st_value < nextval
3675 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3676 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3677 nextval = mod->symtab[i].st_value;
3684 *size = nextval - mod->symtab[best].st_value;
3686 *offset = addr - mod->symtab[best].st_value;
3687 return mod->strtab + mod->symtab[best].st_name;
3690 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3691 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3692 const char *module_address_lookup(unsigned long addr,
3693 unsigned long *size,
3694 unsigned long *offset,
3698 const char *ret = NULL;
3702 mod = __module_address(addr);
3705 *modname = mod->name;
3706 ret = get_ksymbol(mod, addr, size, offset);
3708 /* Make a copy in here where it's safe */
3710 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3718 int lookup_module_symbol_name(unsigned long addr, char *symname)
3723 list_for_each_entry_rcu(mod, &modules, list) {
3724 if (mod->state == MODULE_STATE_UNFORMED)
3726 if (within_module(addr, mod)) {
3729 sym = get_ksymbol(mod, addr, NULL, NULL);
3732 strlcpy(symname, sym, KSYM_NAME_LEN);
3742 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3743 unsigned long *offset, char *modname, char *name)
3748 list_for_each_entry_rcu(mod, &modules, list) {
3749 if (mod->state == MODULE_STATE_UNFORMED)
3751 if (within_module(addr, mod)) {
3754 sym = get_ksymbol(mod, addr, size, offset);
3758 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3760 strlcpy(name, sym, KSYM_NAME_LEN);
3770 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3771 char *name, char *module_name, int *exported)
3776 list_for_each_entry_rcu(mod, &modules, list) {
3777 if (mod->state == MODULE_STATE_UNFORMED)
3779 if (symnum < mod->num_symtab) {
3780 *value = mod->symtab[symnum].st_value;
3781 *type = mod->symtab[symnum].st_info;
3782 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3784 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3785 *exported = is_exported(name, *value, mod);
3789 symnum -= mod->num_symtab;
3795 static unsigned long mod_find_symname(struct module *mod, const char *name)
3799 for (i = 0; i < mod->num_symtab; i++)
3800 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3801 mod->symtab[i].st_info != 'U')
3802 return mod->symtab[i].st_value;
3806 /* Look for this name: can be of form module:name. */
3807 unsigned long module_kallsyms_lookup_name(const char *name)
3811 unsigned long ret = 0;
3813 /* Don't lock: we're in enough trouble already. */
3815 if ((colon = strchr(name, ':')) != NULL) {
3816 if ((mod = find_module_all(name, colon - name, false)) != NULL)
3817 ret = mod_find_symname(mod, colon+1);
3819 list_for_each_entry_rcu(mod, &modules, list) {
3820 if (mod->state == MODULE_STATE_UNFORMED)
3822 if ((ret = mod_find_symname(mod, name)) != 0)
3830 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3831 struct module *, unsigned long),
3838 module_assert_mutex();
3840 list_for_each_entry(mod, &modules, list) {
3841 if (mod->state == MODULE_STATE_UNFORMED)
3843 for (i = 0; i < mod->num_symtab; i++) {
3844 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3845 mod, mod->symtab[i].st_value);
3852 #endif /* CONFIG_KALLSYMS */
3854 static char *module_flags(struct module *mod, char *buf)
3858 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3860 mod->state == MODULE_STATE_GOING ||
3861 mod->state == MODULE_STATE_COMING) {
3863 bx += module_flags_taint(mod, buf + bx);
3864 /* Show a - for module-is-being-unloaded */
3865 if (mod->state == MODULE_STATE_GOING)
3867 /* Show a + for module-is-being-loaded */
3868 if (mod->state == MODULE_STATE_COMING)
3877 #ifdef CONFIG_PROC_FS
3878 /* Called by the /proc file system to return a list of modules. */
3879 static void *m_start(struct seq_file *m, loff_t *pos)
3881 mutex_lock(&module_mutex);
3882 return seq_list_start(&modules, *pos);
3885 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3887 return seq_list_next(p, &modules, pos);
3890 static void m_stop(struct seq_file *m, void *p)
3892 mutex_unlock(&module_mutex);
3895 static int m_show(struct seq_file *m, void *p)
3897 struct module *mod = list_entry(p, struct module, list);
3900 /* We always ignore unformed modules. */
3901 if (mod->state == MODULE_STATE_UNFORMED)
3904 seq_printf(m, "%s %u",
3905 mod->name, mod->init_size + mod->core_size);
3906 print_unload_info(m, mod);
3908 /* Informative for users. */
3909 seq_printf(m, " %s",
3910 mod->state == MODULE_STATE_GOING ? "Unloading" :
3911 mod->state == MODULE_STATE_COMING ? "Loading" :
3913 /* Used by oprofile and other similar tools. */
3914 seq_printf(m, " 0x%pK", mod->module_core);
3918 seq_printf(m, " %s", module_flags(mod, buf));
3924 /* Format: modulename size refcount deps address
3926 Where refcount is a number or -, and deps is a comma-separated list
3929 static const struct seq_operations modules_op = {
3936 static int modules_open(struct inode *inode, struct file *file)
3938 return seq_open(file, &modules_op);
3941 static const struct file_operations proc_modules_operations = {
3942 .open = modules_open,
3944 .llseek = seq_lseek,
3945 .release = seq_release,
3948 static int __init proc_modules_init(void)
3950 proc_create("modules", 0, NULL, &proc_modules_operations);
3953 module_init(proc_modules_init);
3956 /* Given an address, look for it in the module exception tables. */
3957 const struct exception_table_entry *search_module_extables(unsigned long addr)
3959 const struct exception_table_entry *e = NULL;
3963 list_for_each_entry_rcu(mod, &modules, list) {
3964 if (mod->state == MODULE_STATE_UNFORMED)
3966 if (mod->num_exentries == 0)
3969 e = search_extable(mod->extable,
3970 mod->extable + mod->num_exentries - 1,
3977 /* Now, if we found one, we are running inside it now, hence
3978 we cannot unload the module, hence no refcnt needed. */
3983 * is_module_address - is this address inside a module?
3984 * @addr: the address to check.
3986 * See is_module_text_address() if you simply want to see if the address
3987 * is code (not data).
3989 bool is_module_address(unsigned long addr)
3994 ret = __module_address(addr) != NULL;
4001 * __module_address - get the module which contains an address.
4002 * @addr: the address.
4004 * Must be called with preempt disabled or module mutex held so that
4005 * module doesn't get freed during this.
4007 struct module *__module_address(unsigned long addr)
4011 if (addr < module_addr_min || addr > module_addr_max)
4014 module_assert_mutex_or_preempt();
4016 mod = mod_find(addr);
4018 BUG_ON(!within_module(addr, mod));
4019 if (mod->state == MODULE_STATE_UNFORMED)
4024 EXPORT_SYMBOL_GPL(__module_address);
4027 * is_module_text_address - is this address inside module code?
4028 * @addr: the address to check.
4030 * See is_module_address() if you simply want to see if the address is
4031 * anywhere in a module. See kernel_text_address() for testing if an
4032 * address corresponds to kernel or module code.
4034 bool is_module_text_address(unsigned long addr)
4039 ret = __module_text_address(addr) != NULL;
4046 * __module_text_address - get the module whose code contains an address.
4047 * @addr: the address.
4049 * Must be called with preempt disabled or module mutex held so that
4050 * module doesn't get freed during this.
4052 struct module *__module_text_address(unsigned long addr)
4054 struct module *mod = __module_address(addr);
4056 /* Make sure it's within the text section. */
4057 if (!within(addr, mod->module_init, mod->init_text_size)
4058 && !within(addr, mod->module_core, mod->core_text_size))
4063 EXPORT_SYMBOL_GPL(__module_text_address);
4065 /* Don't grab lock, we're oopsing. */
4066 void print_modules(void)
4071 printk(KERN_DEFAULT "Modules linked in:");
4072 /* Most callers should already have preempt disabled, but make sure */
4074 list_for_each_entry_rcu(mod, &modules, list) {
4075 if (mod->state == MODULE_STATE_UNFORMED)
4077 pr_cont(" %s%s", mod->name, module_flags(mod, buf));
4080 if (last_unloaded_module[0])
4081 pr_cont(" [last unloaded: %s]", last_unloaded_module);
4085 #ifdef CONFIG_MODVERSIONS
4086 /* Generate the signature for all relevant module structures here.
4087 * If these change, we don't want to try to parse the module. */
4088 void module_layout(struct module *mod,
4089 struct modversion_info *ver,
4090 struct kernel_param *kp,
4091 struct kernel_symbol *ks,
4092 struct tracepoint * const *tp)
4095 EXPORT_SYMBOL(module_layout);