2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
46 #include <linux/memory.h>
48 #include <asm-generic/sections.h>
49 #include <asm/cacheflush.h>
50 #include <asm/errno.h>
51 #include <asm/uaccess.h>
53 #define KPROBE_HASH_BITS 6
54 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
58 * Some oddball architectures like 64bit powerpc have function descriptors
59 * so this must be overridable.
61 #ifndef kprobe_lookup_name
62 #define kprobe_lookup_name(name, addr) \
63 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
66 static int kprobes_initialized;
67 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
68 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
70 /* NOTE: change this value only with kprobe_mutex held */
71 static bool kprobes_all_disarmed;
73 static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
74 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
76 spinlock_t lock ____cacheline_aligned_in_smp;
77 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
79 static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
81 return &(kretprobe_table_locks[hash].lock);
85 * Normally, functions that we'd want to prohibit kprobes in, are marked
86 * __kprobes. But, there are cases where such functions already belong to
87 * a different section (__sched for preempt_schedule)
89 * For such cases, we now have a blacklist
91 static struct kprobe_blackpoint kprobe_blacklist[] = {
92 {"preempt_schedule",},
93 {"native_get_debugreg",},
94 {"irq_entries_start",},
95 {"common_interrupt",},
96 {"mcount",}, /* mcount can be called from everywhere */
97 {NULL} /* Terminator */
100 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
102 * kprobe->ainsn.insn points to the copy of the instruction to be
103 * single-stepped. x86_64, POWER4 and above have no-exec support and
104 * stepping on the instruction on a vmalloced/kmalloced/data page
105 * is a recipe for disaster
107 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
109 struct kprobe_insn_page {
110 struct list_head list;
111 kprobe_opcode_t *insns; /* Page of instruction slots */
112 char slot_used[INSNS_PER_PAGE];
117 enum kprobe_slot_state {
123 static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
124 static LIST_HEAD(kprobe_insn_pages);
125 static int kprobe_garbage_slots;
126 static int collect_garbage_slots(void);
128 static int __kprobes check_safety(void)
131 #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
132 ret = freeze_processes();
134 struct task_struct *p, *q;
135 do_each_thread(p, q) {
136 if (p != current && p->state == TASK_RUNNING &&
138 printk("Check failed: %s is running\n",p->comm);
142 } while_each_thread(p, q);
153 * __get_insn_slot() - Find a slot on an executable page for an instruction.
154 * We allocate an executable page if there's no room on existing ones.
156 static kprobe_opcode_t __kprobes *__get_insn_slot(void)
158 struct kprobe_insn_page *kip;
161 list_for_each_entry(kip, &kprobe_insn_pages, list) {
162 if (kip->nused < INSNS_PER_PAGE) {
164 for (i = 0; i < INSNS_PER_PAGE; i++) {
165 if (kip->slot_used[i] == SLOT_CLEAN) {
166 kip->slot_used[i] = SLOT_USED;
168 return kip->insns + (i * MAX_INSN_SIZE);
171 /* Surprise! No unused slots. Fix kip->nused. */
172 kip->nused = INSNS_PER_PAGE;
176 /* If there are any garbage slots, collect it and try again. */
177 if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
180 /* All out of space. Need to allocate a new page. Use slot 0. */
181 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
186 * Use module_alloc so this page is within +/- 2GB of where the
187 * kernel image and loaded module images reside. This is required
188 * so x86_64 can correctly handle the %rip-relative fixups.
190 kip->insns = module_alloc(PAGE_SIZE);
195 INIT_LIST_HEAD(&kip->list);
196 list_add(&kip->list, &kprobe_insn_pages);
197 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
198 kip->slot_used[0] = SLOT_USED;
204 kprobe_opcode_t __kprobes *get_insn_slot(void)
206 kprobe_opcode_t *ret;
207 mutex_lock(&kprobe_insn_mutex);
208 ret = __get_insn_slot();
209 mutex_unlock(&kprobe_insn_mutex);
213 /* Return 1 if all garbages are collected, otherwise 0. */
214 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
216 kip->slot_used[idx] = SLOT_CLEAN;
218 if (kip->nused == 0) {
220 * Page is no longer in use. Free it unless
221 * it's the last one. We keep the last one
222 * so as not to have to set it up again the
223 * next time somebody inserts a probe.
225 if (!list_is_singular(&kprobe_insn_pages)) {
226 list_del(&kip->list);
227 module_free(NULL, kip->insns);
235 static int __kprobes collect_garbage_slots(void)
237 struct kprobe_insn_page *kip, *next;
239 /* Ensure no-one is preepmted on the garbages */
243 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
245 if (kip->ngarbage == 0)
247 kip->ngarbage = 0; /* we will collect all garbages */
248 for (i = 0; i < INSNS_PER_PAGE; i++) {
249 if (kip->slot_used[i] == SLOT_DIRTY &&
250 collect_one_slot(kip, i))
254 kprobe_garbage_slots = 0;
258 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
260 struct kprobe_insn_page *kip;
262 mutex_lock(&kprobe_insn_mutex);
263 list_for_each_entry(kip, &kprobe_insn_pages, list) {
264 if (kip->insns <= slot &&
265 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
266 int i = (slot - kip->insns) / MAX_INSN_SIZE;
268 kip->slot_used[i] = SLOT_DIRTY;
271 collect_one_slot(kip, i);
276 if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
277 collect_garbage_slots();
279 mutex_unlock(&kprobe_insn_mutex);
283 /* We have preemption disabled.. so it is safe to use __ versions */
284 static inline void set_kprobe_instance(struct kprobe *kp)
286 __get_cpu_var(kprobe_instance) = kp;
289 static inline void reset_kprobe_instance(void)
291 __get_cpu_var(kprobe_instance) = NULL;
295 * This routine is called either:
296 * - under the kprobe_mutex - during kprobe_[un]register()
298 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
300 struct kprobe __kprobes *get_kprobe(void *addr)
302 struct hlist_head *head;
303 struct hlist_node *node;
306 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
307 hlist_for_each_entry_rcu(p, node, head, hlist) {
314 /* Arm a kprobe with text_mutex */
315 static void __kprobes arm_kprobe(struct kprobe *kp)
317 mutex_lock(&text_mutex);
319 mutex_unlock(&text_mutex);
322 /* Disarm a kprobe with text_mutex */
323 static void __kprobes disarm_kprobe(struct kprobe *kp)
325 mutex_lock(&text_mutex);
326 arch_disarm_kprobe(kp);
327 mutex_unlock(&text_mutex);
331 * Aggregate handlers for multiple kprobes support - these handlers
332 * take care of invoking the individual kprobe handlers on p->list
334 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
338 list_for_each_entry_rcu(kp, &p->list, list) {
339 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
340 set_kprobe_instance(kp);
341 if (kp->pre_handler(kp, regs))
344 reset_kprobe_instance();
349 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
354 list_for_each_entry_rcu(kp, &p->list, list) {
355 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
356 set_kprobe_instance(kp);
357 kp->post_handler(kp, regs, flags);
358 reset_kprobe_instance();
363 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
366 struct kprobe *cur = __get_cpu_var(kprobe_instance);
369 * if we faulted "during" the execution of a user specified
370 * probe handler, invoke just that probe's fault handler
372 if (cur && cur->fault_handler) {
373 if (cur->fault_handler(cur, regs, trapnr))
379 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
381 struct kprobe *cur = __get_cpu_var(kprobe_instance);
384 if (cur && cur->break_handler) {
385 if (cur->break_handler(cur, regs))
388 reset_kprobe_instance();
392 /* Walks the list and increments nmissed count for multiprobe case */
393 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
396 if (p->pre_handler != aggr_pre_handler) {
399 list_for_each_entry_rcu(kp, &p->list, list)
405 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
406 struct hlist_head *head)
408 struct kretprobe *rp = ri->rp;
410 /* remove rp inst off the rprobe_inst_table */
411 hlist_del(&ri->hlist);
412 INIT_HLIST_NODE(&ri->hlist);
414 spin_lock(&rp->lock);
415 hlist_add_head(&ri->hlist, &rp->free_instances);
416 spin_unlock(&rp->lock);
419 hlist_add_head(&ri->hlist, head);
422 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
423 struct hlist_head **head, unsigned long *flags)
425 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
426 spinlock_t *hlist_lock;
428 *head = &kretprobe_inst_table[hash];
429 hlist_lock = kretprobe_table_lock_ptr(hash);
430 spin_lock_irqsave(hlist_lock, *flags);
433 static void __kprobes kretprobe_table_lock(unsigned long hash,
434 unsigned long *flags)
436 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
437 spin_lock_irqsave(hlist_lock, *flags);
440 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
441 unsigned long *flags)
443 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
444 spinlock_t *hlist_lock;
446 hlist_lock = kretprobe_table_lock_ptr(hash);
447 spin_unlock_irqrestore(hlist_lock, *flags);
450 void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
452 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
453 spin_unlock_irqrestore(hlist_lock, *flags);
457 * This function is called from finish_task_switch when task tk becomes dead,
458 * so that we can recycle any function-return probe instances associated
459 * with this task. These left over instances represent probed functions
460 * that have been called but will never return.
462 void __kprobes kprobe_flush_task(struct task_struct *tk)
464 struct kretprobe_instance *ri;
465 struct hlist_head *head, empty_rp;
466 struct hlist_node *node, *tmp;
467 unsigned long hash, flags = 0;
469 if (unlikely(!kprobes_initialized))
470 /* Early boot. kretprobe_table_locks not yet initialized. */
473 hash = hash_ptr(tk, KPROBE_HASH_BITS);
474 head = &kretprobe_inst_table[hash];
475 kretprobe_table_lock(hash, &flags);
476 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
478 recycle_rp_inst(ri, &empty_rp);
480 kretprobe_table_unlock(hash, &flags);
481 INIT_HLIST_HEAD(&empty_rp);
482 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
483 hlist_del(&ri->hlist);
488 static inline void free_rp_inst(struct kretprobe *rp)
490 struct kretprobe_instance *ri;
491 struct hlist_node *pos, *next;
493 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
494 hlist_del(&ri->hlist);
499 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
501 unsigned long flags, hash;
502 struct kretprobe_instance *ri;
503 struct hlist_node *pos, *next;
504 struct hlist_head *head;
507 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
508 kretprobe_table_lock(hash, &flags);
509 head = &kretprobe_inst_table[hash];
510 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
514 kretprobe_table_unlock(hash, &flags);
520 * Keep all fields in the kprobe consistent
522 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
524 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
525 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
529 * Add the new probe to ap->list. Fail if this is the
530 * second jprobe at the address - two jprobes can't coexist
532 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
534 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
535 if (p->break_handler) {
536 if (ap->break_handler)
538 list_add_tail_rcu(&p->list, &ap->list);
539 ap->break_handler = aggr_break_handler;
541 list_add_rcu(&p->list, &ap->list);
542 if (p->post_handler && !ap->post_handler)
543 ap->post_handler = aggr_post_handler;
545 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
546 ap->flags &= ~KPROBE_FLAG_DISABLED;
547 if (!kprobes_all_disarmed)
548 /* Arm the breakpoint again. */
555 * Fill in the required fields of the "manager kprobe". Replace the
556 * earlier kprobe in the hlist with the manager kprobe
558 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
563 ap->flags = p->flags;
564 ap->pre_handler = aggr_pre_handler;
565 ap->fault_handler = aggr_fault_handler;
566 /* We don't care the kprobe which has gone. */
567 if (p->post_handler && !kprobe_gone(p))
568 ap->post_handler = aggr_post_handler;
569 if (p->break_handler && !kprobe_gone(p))
570 ap->break_handler = aggr_break_handler;
572 INIT_LIST_HEAD(&ap->list);
573 list_add_rcu(&p->list, &ap->list);
575 hlist_replace_rcu(&p->hlist, &ap->hlist);
579 * This is the second or subsequent kprobe at the address - handle
582 static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
586 struct kprobe *ap = old_p;
588 if (old_p->pre_handler != aggr_pre_handler) {
589 /* If old_p is not an aggr_probe, create new aggr_kprobe. */
590 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
593 add_aggr_kprobe(ap, old_p);
596 if (kprobe_gone(ap)) {
598 * Attempting to insert new probe at the same location that
599 * had a probe in the module vaddr area which already
600 * freed. So, the instruction slot has already been
601 * released. We need a new slot for the new probe.
603 ret = arch_prepare_kprobe(ap);
606 * Even if fail to allocate new slot, don't need to
607 * free aggr_probe. It will be used next time, or
608 * freed by unregister_kprobe.
613 * Clear gone flag to prevent allocating new slot again, and
614 * set disabled flag because it is not armed yet.
616 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
617 | KPROBE_FLAG_DISABLED;
621 return add_new_kprobe(ap, p);
624 /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
625 static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
629 list_for_each_entry_rcu(kp, &p->list, list) {
630 if (!kprobe_disabled(kp))
632 * There is an active probe on the list.
633 * We can't disable aggr_kprobe.
637 p->flags |= KPROBE_FLAG_DISABLED;
641 static int __kprobes in_kprobes_functions(unsigned long addr)
643 struct kprobe_blackpoint *kb;
645 if (addr >= (unsigned long)__kprobes_text_start &&
646 addr < (unsigned long)__kprobes_text_end)
649 * If there exists a kprobe_blacklist, verify and
650 * fail any probe registration in the prohibited area
652 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
653 if (kb->start_addr) {
654 if (addr >= kb->start_addr &&
655 addr < (kb->start_addr + kb->range))
663 * If we have a symbol_name argument, look it up and add the offset field
664 * to it. This way, we can specify a relative address to a symbol.
666 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
668 kprobe_opcode_t *addr = p->addr;
669 if (p->symbol_name) {
672 kprobe_lookup_name(p->symbol_name, addr);
677 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
680 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
681 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
683 struct kprobe *old_p, *list_p;
685 old_p = get_kprobe(p->addr);
686 if (unlikely(!old_p))
690 list_for_each_entry_rcu(list_p, &old_p->list, list)
692 /* kprobe p is a valid probe */
700 /* Return error if the kprobe is being re-registered */
701 static inline int check_kprobe_rereg(struct kprobe *p)
704 struct kprobe *old_p;
706 mutex_lock(&kprobe_mutex);
707 old_p = __get_valid_kprobe(p);
710 mutex_unlock(&kprobe_mutex);
714 int __kprobes register_kprobe(struct kprobe *p)
717 struct kprobe *old_p;
718 struct module *probed_mod;
719 kprobe_opcode_t *addr;
721 addr = kprobe_addr(p);
726 ret = check_kprobe_rereg(p);
731 if (!kernel_text_address((unsigned long) p->addr) ||
732 in_kprobes_functions((unsigned long) p->addr)) {
737 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
738 p->flags &= KPROBE_FLAG_DISABLED;
741 * Check if are we probing a module.
743 probed_mod = __module_text_address((unsigned long) p->addr);
746 * We must hold a refcount of the probed module while updating
747 * its code to prohibit unexpected unloading.
749 if (unlikely(!try_module_get(probed_mod))) {
754 * If the module freed .init.text, we couldn't insert
757 if (within_module_init((unsigned long)p->addr, probed_mod) &&
758 probed_mod->state != MODULE_STATE_COMING) {
759 module_put(probed_mod);
767 INIT_LIST_HEAD(&p->list);
768 mutex_lock(&kprobe_mutex);
769 old_p = get_kprobe(p->addr);
771 ret = register_aggr_kprobe(old_p, p);
775 mutex_lock(&text_mutex);
776 ret = arch_prepare_kprobe(p);
778 goto out_unlock_text;
780 INIT_HLIST_NODE(&p->hlist);
781 hlist_add_head_rcu(&p->hlist,
782 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
784 if (!kprobes_all_disarmed && !kprobe_disabled(p))
788 mutex_unlock(&text_mutex);
790 mutex_unlock(&kprobe_mutex);
793 module_put(probed_mod);
797 EXPORT_SYMBOL_GPL(register_kprobe);
800 * Unregister a kprobe without a scheduler synchronization.
802 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
804 struct kprobe *old_p, *list_p;
806 old_p = __get_valid_kprobe(p);
811 (old_p->pre_handler == aggr_pre_handler &&
812 list_is_singular(&old_p->list))) {
814 * Only probe on the hash list. Disarm only if kprobes are
815 * enabled and not gone - otherwise, the breakpoint would
816 * already have been removed. We save on flushing icache.
818 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
820 hlist_del_rcu(&old_p->hlist);
822 if (p->break_handler && !kprobe_gone(p))
823 old_p->break_handler = NULL;
824 if (p->post_handler && !kprobe_gone(p)) {
825 list_for_each_entry_rcu(list_p, &old_p->list, list) {
826 if ((list_p != p) && (list_p->post_handler))
829 old_p->post_handler = NULL;
832 list_del_rcu(&p->list);
833 if (!kprobe_disabled(old_p)) {
834 try_to_disable_aggr_kprobe(old_p);
835 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
836 disarm_kprobe(old_p);
842 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
844 struct kprobe *old_p;
846 if (list_empty(&p->list))
847 arch_remove_kprobe(p);
848 else if (list_is_singular(&p->list)) {
849 /* "p" is the last child of an aggr_kprobe */
850 old_p = list_entry(p->list.next, struct kprobe, list);
852 arch_remove_kprobe(old_p);
857 int __kprobes register_kprobes(struct kprobe **kps, int num)
863 for (i = 0; i < num; i++) {
864 ret = register_kprobe(kps[i]);
867 unregister_kprobes(kps, i);
873 EXPORT_SYMBOL_GPL(register_kprobes);
875 void __kprobes unregister_kprobe(struct kprobe *p)
877 unregister_kprobes(&p, 1);
879 EXPORT_SYMBOL_GPL(unregister_kprobe);
881 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
887 mutex_lock(&kprobe_mutex);
888 for (i = 0; i < num; i++)
889 if (__unregister_kprobe_top(kps[i]) < 0)
891 mutex_unlock(&kprobe_mutex);
894 for (i = 0; i < num; i++)
896 __unregister_kprobe_bottom(kps[i]);
898 EXPORT_SYMBOL_GPL(unregister_kprobes);
900 static struct notifier_block kprobe_exceptions_nb = {
901 .notifier_call = kprobe_exceptions_notify,
902 .priority = 0x7fffffff /* we need to be notified first */
905 unsigned long __weak arch_deref_entry_point(void *entry)
907 return (unsigned long)entry;
910 int __kprobes register_jprobes(struct jprobe **jps, int num)
917 for (i = 0; i < num; i++) {
920 addr = arch_deref_entry_point(jp->entry);
922 if (!kernel_text_address(addr))
925 /* Todo: Verify probepoint is a function entry point */
926 jp->kp.pre_handler = setjmp_pre_handler;
927 jp->kp.break_handler = longjmp_break_handler;
928 ret = register_kprobe(&jp->kp);
932 unregister_jprobes(jps, i);
938 EXPORT_SYMBOL_GPL(register_jprobes);
940 int __kprobes register_jprobe(struct jprobe *jp)
942 return register_jprobes(&jp, 1);
944 EXPORT_SYMBOL_GPL(register_jprobe);
946 void __kprobes unregister_jprobe(struct jprobe *jp)
948 unregister_jprobes(&jp, 1);
950 EXPORT_SYMBOL_GPL(unregister_jprobe);
952 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
958 mutex_lock(&kprobe_mutex);
959 for (i = 0; i < num; i++)
960 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
961 jps[i]->kp.addr = NULL;
962 mutex_unlock(&kprobe_mutex);
965 for (i = 0; i < num; i++) {
967 __unregister_kprobe_bottom(&jps[i]->kp);
970 EXPORT_SYMBOL_GPL(unregister_jprobes);
972 #ifdef CONFIG_KRETPROBES
974 * This kprobe pre_handler is registered with every kretprobe. When probe
975 * hits it will set up the return probe.
977 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
978 struct pt_regs *regs)
980 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
981 unsigned long hash, flags = 0;
982 struct kretprobe_instance *ri;
984 /*TODO: consider to only swap the RA after the last pre_handler fired */
985 hash = hash_ptr(current, KPROBE_HASH_BITS);
986 spin_lock_irqsave(&rp->lock, flags);
987 if (!hlist_empty(&rp->free_instances)) {
988 ri = hlist_entry(rp->free_instances.first,
989 struct kretprobe_instance, hlist);
990 hlist_del(&ri->hlist);
991 spin_unlock_irqrestore(&rp->lock, flags);
996 if (rp->entry_handler && rp->entry_handler(ri, regs))
999 arch_prepare_kretprobe(ri, regs);
1001 /* XXX(hch): why is there no hlist_move_head? */
1002 INIT_HLIST_NODE(&ri->hlist);
1003 kretprobe_table_lock(hash, &flags);
1004 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1005 kretprobe_table_unlock(hash, &flags);
1008 spin_unlock_irqrestore(&rp->lock, flags);
1013 int __kprobes register_kretprobe(struct kretprobe *rp)
1016 struct kretprobe_instance *inst;
1020 if (kretprobe_blacklist_size) {
1021 addr = kprobe_addr(&rp->kp);
1025 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1026 if (kretprobe_blacklist[i].addr == addr)
1031 rp->kp.pre_handler = pre_handler_kretprobe;
1032 rp->kp.post_handler = NULL;
1033 rp->kp.fault_handler = NULL;
1034 rp->kp.break_handler = NULL;
1036 /* Pre-allocate memory for max kretprobe instances */
1037 if (rp->maxactive <= 0) {
1038 #ifdef CONFIG_PREEMPT
1039 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1041 rp->maxactive = num_possible_cpus();
1044 spin_lock_init(&rp->lock);
1045 INIT_HLIST_HEAD(&rp->free_instances);
1046 for (i = 0; i < rp->maxactive; i++) {
1047 inst = kmalloc(sizeof(struct kretprobe_instance) +
1048 rp->data_size, GFP_KERNEL);
1053 INIT_HLIST_NODE(&inst->hlist);
1054 hlist_add_head(&inst->hlist, &rp->free_instances);
1058 /* Establish function entry probe point */
1059 ret = register_kprobe(&rp->kp);
1064 EXPORT_SYMBOL_GPL(register_kretprobe);
1066 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1072 for (i = 0; i < num; i++) {
1073 ret = register_kretprobe(rps[i]);
1076 unregister_kretprobes(rps, i);
1082 EXPORT_SYMBOL_GPL(register_kretprobes);
1084 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1086 unregister_kretprobes(&rp, 1);
1088 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1090 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1096 mutex_lock(&kprobe_mutex);
1097 for (i = 0; i < num; i++)
1098 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1099 rps[i]->kp.addr = NULL;
1100 mutex_unlock(&kprobe_mutex);
1102 synchronize_sched();
1103 for (i = 0; i < num; i++) {
1104 if (rps[i]->kp.addr) {
1105 __unregister_kprobe_bottom(&rps[i]->kp);
1106 cleanup_rp_inst(rps[i]);
1110 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1112 #else /* CONFIG_KRETPROBES */
1113 int __kprobes register_kretprobe(struct kretprobe *rp)
1117 EXPORT_SYMBOL_GPL(register_kretprobe);
1119 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1123 EXPORT_SYMBOL_GPL(register_kretprobes);
1125 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1128 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1130 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1133 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1135 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1136 struct pt_regs *regs)
1141 #endif /* CONFIG_KRETPROBES */
1143 /* Set the kprobe gone and remove its instruction buffer. */
1144 static void __kprobes kill_kprobe(struct kprobe *p)
1148 p->flags |= KPROBE_FLAG_GONE;
1149 if (p->pre_handler == aggr_pre_handler) {
1151 * If this is an aggr_kprobe, we have to list all the
1152 * chained probes and mark them GONE.
1154 list_for_each_entry_rcu(kp, &p->list, list)
1155 kp->flags |= KPROBE_FLAG_GONE;
1156 p->post_handler = NULL;
1157 p->break_handler = NULL;
1160 * Here, we can remove insn_slot safely, because no thread calls
1161 * the original probed function (which will be freed soon) any more.
1163 arch_remove_kprobe(p);
1166 void __kprobes dump_kprobe(struct kprobe *kp)
1168 printk(KERN_WARNING "Dumping kprobe:\n");
1169 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1170 kp->symbol_name, kp->addr, kp->offset);
1173 /* Module notifier call back, checking kprobes on the module */
1174 static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1175 unsigned long val, void *data)
1177 struct module *mod = data;
1178 struct hlist_head *head;
1179 struct hlist_node *node;
1182 int checkcore = (val == MODULE_STATE_GOING);
1184 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1188 * When MODULE_STATE_GOING was notified, both of module .text and
1189 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1190 * notified, only .init.text section would be freed. We need to
1191 * disable kprobes which have been inserted in the sections.
1193 mutex_lock(&kprobe_mutex);
1194 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1195 head = &kprobe_table[i];
1196 hlist_for_each_entry_rcu(p, node, head, hlist)
1197 if (within_module_init((unsigned long)p->addr, mod) ||
1199 within_module_core((unsigned long)p->addr, mod))) {
1201 * The vaddr this probe is installed will soon
1202 * be vfreed buy not synced to disk. Hence,
1203 * disarming the breakpoint isn't needed.
1208 mutex_unlock(&kprobe_mutex);
1212 static struct notifier_block kprobe_module_nb = {
1213 .notifier_call = kprobes_module_callback,
1217 static int __init init_kprobes(void)
1220 unsigned long offset = 0, size = 0;
1221 char *modname, namebuf[128];
1222 const char *symbol_name;
1224 struct kprobe_blackpoint *kb;
1226 /* FIXME allocate the probe table, currently defined statically */
1227 /* initialize all list heads */
1228 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1229 INIT_HLIST_HEAD(&kprobe_table[i]);
1230 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1231 spin_lock_init(&(kretprobe_table_locks[i].lock));
1235 * Lookup and populate the kprobe_blacklist.
1237 * Unlike the kretprobe blacklist, we'll need to determine
1238 * the range of addresses that belong to the said functions,
1239 * since a kprobe need not necessarily be at the beginning
1242 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1243 kprobe_lookup_name(kb->name, addr);
1247 kb->start_addr = (unsigned long)addr;
1248 symbol_name = kallsyms_lookup(kb->start_addr,
1249 &size, &offset, &modname, namebuf);
1256 if (kretprobe_blacklist_size) {
1257 /* lookup the function address from its name */
1258 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1259 kprobe_lookup_name(kretprobe_blacklist[i].name,
1260 kretprobe_blacklist[i].addr);
1261 if (!kretprobe_blacklist[i].addr)
1262 printk("kretprobe: lookup failed: %s\n",
1263 kretprobe_blacklist[i].name);
1267 /* By default, kprobes are armed */
1268 kprobes_all_disarmed = false;
1270 err = arch_init_kprobes();
1272 err = register_die_notifier(&kprobe_exceptions_nb);
1274 err = register_module_notifier(&kprobe_module_nb);
1276 kprobes_initialized = (err == 0);
1283 #ifdef CONFIG_DEBUG_FS
1284 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
1285 const char *sym, int offset,char *modname)
1289 if (p->pre_handler == pre_handler_kretprobe)
1291 else if (p->pre_handler == setjmp_pre_handler)
1296 seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
1297 p->addr, kprobe_type, sym, offset,
1298 (modname ? modname : " "),
1299 (kprobe_gone(p) ? "[GONE]" : ""),
1300 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1301 "[DISABLED]" : ""));
1303 seq_printf(pi, "%p %s %p %s%s\n",
1304 p->addr, kprobe_type, p->addr,
1305 (kprobe_gone(p) ? "[GONE]" : ""),
1306 ((kprobe_disabled(p) && !kprobe_gone(p)) ?
1307 "[DISABLED]" : ""));
1310 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
1312 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1315 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
1318 if (*pos >= KPROBE_TABLE_SIZE)
1323 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
1328 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
1330 struct hlist_head *head;
1331 struct hlist_node *node;
1332 struct kprobe *p, *kp;
1333 const char *sym = NULL;
1334 unsigned int i = *(loff_t *) v;
1335 unsigned long offset = 0;
1336 char *modname, namebuf[128];
1338 head = &kprobe_table[i];
1340 hlist_for_each_entry_rcu(p, node, head, hlist) {
1341 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
1342 &offset, &modname, namebuf);
1343 if (p->pre_handler == aggr_pre_handler) {
1344 list_for_each_entry_rcu(kp, &p->list, list)
1345 report_probe(pi, kp, sym, offset, modname);
1347 report_probe(pi, p, sym, offset, modname);
1353 static const struct seq_operations kprobes_seq_ops = {
1354 .start = kprobe_seq_start,
1355 .next = kprobe_seq_next,
1356 .stop = kprobe_seq_stop,
1357 .show = show_kprobe_addr
1360 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1362 return seq_open(filp, &kprobes_seq_ops);
1365 static const struct file_operations debugfs_kprobes_operations = {
1366 .open = kprobes_open,
1368 .llseek = seq_lseek,
1369 .release = seq_release,
1372 /* Disable one kprobe */
1373 int __kprobes disable_kprobe(struct kprobe *kp)
1378 mutex_lock(&kprobe_mutex);
1380 /* Check whether specified probe is valid. */
1381 p = __get_valid_kprobe(kp);
1382 if (unlikely(p == NULL)) {
1387 /* If the probe is already disabled (or gone), just return */
1388 if (kprobe_disabled(kp))
1391 kp->flags |= KPROBE_FLAG_DISABLED;
1393 /* When kp != p, p is always enabled. */
1394 try_to_disable_aggr_kprobe(p);
1396 if (!kprobes_all_disarmed && kprobe_disabled(p))
1399 mutex_unlock(&kprobe_mutex);
1402 EXPORT_SYMBOL_GPL(disable_kprobe);
1404 /* Enable one kprobe */
1405 int __kprobes enable_kprobe(struct kprobe *kp)
1410 mutex_lock(&kprobe_mutex);
1412 /* Check whether specified probe is valid. */
1413 p = __get_valid_kprobe(kp);
1414 if (unlikely(p == NULL)) {
1419 if (kprobe_gone(kp)) {
1420 /* This kprobe has gone, we couldn't enable it. */
1425 if (!kprobes_all_disarmed && kprobe_disabled(p))
1428 p->flags &= ~KPROBE_FLAG_DISABLED;
1430 kp->flags &= ~KPROBE_FLAG_DISABLED;
1432 mutex_unlock(&kprobe_mutex);
1435 EXPORT_SYMBOL_GPL(enable_kprobe);
1437 static void __kprobes arm_all_kprobes(void)
1439 struct hlist_head *head;
1440 struct hlist_node *node;
1444 mutex_lock(&kprobe_mutex);
1446 /* If kprobes are armed, just return */
1447 if (!kprobes_all_disarmed)
1448 goto already_enabled;
1450 mutex_lock(&text_mutex);
1451 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1452 head = &kprobe_table[i];
1453 hlist_for_each_entry_rcu(p, node, head, hlist)
1454 if (!kprobe_disabled(p))
1457 mutex_unlock(&text_mutex);
1459 kprobes_all_disarmed = false;
1460 printk(KERN_INFO "Kprobes globally enabled\n");
1463 mutex_unlock(&kprobe_mutex);
1467 static void __kprobes disarm_all_kprobes(void)
1469 struct hlist_head *head;
1470 struct hlist_node *node;
1474 mutex_lock(&kprobe_mutex);
1476 /* If kprobes are already disarmed, just return */
1477 if (kprobes_all_disarmed)
1478 goto already_disabled;
1480 kprobes_all_disarmed = true;
1481 printk(KERN_INFO "Kprobes globally disabled\n");
1482 mutex_lock(&text_mutex);
1483 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1484 head = &kprobe_table[i];
1485 hlist_for_each_entry_rcu(p, node, head, hlist) {
1486 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
1487 arch_disarm_kprobe(p);
1491 mutex_unlock(&text_mutex);
1492 mutex_unlock(&kprobe_mutex);
1493 /* Allow all currently running kprobes to complete */
1494 synchronize_sched();
1498 mutex_unlock(&kprobe_mutex);
1503 * XXX: The debugfs bool file interface doesn't allow for callbacks
1504 * when the bool state is switched. We can reuse that facility when
1507 static ssize_t read_enabled_file_bool(struct file *file,
1508 char __user *user_buf, size_t count, loff_t *ppos)
1512 if (!kprobes_all_disarmed)
1518 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
1521 static ssize_t write_enabled_file_bool(struct file *file,
1522 const char __user *user_buf, size_t count, loff_t *ppos)
1527 buf_size = min(count, (sizeof(buf)-1));
1528 if (copy_from_user(buf, user_buf, buf_size))
1540 disarm_all_kprobes();
1547 static const struct file_operations fops_kp = {
1548 .read = read_enabled_file_bool,
1549 .write = write_enabled_file_bool,
1552 static int __kprobes debugfs_kprobe_init(void)
1554 struct dentry *dir, *file;
1555 unsigned int value = 1;
1557 dir = debugfs_create_dir("kprobes", NULL);
1561 file = debugfs_create_file("list", 0444, dir, NULL,
1562 &debugfs_kprobes_operations);
1564 debugfs_remove(dir);
1568 file = debugfs_create_file("enabled", 0600, dir,
1571 debugfs_remove(dir);
1578 late_initcall(debugfs_kprobe_init);
1579 #endif /* CONFIG_DEBUG_FS */
1581 module_init(init_kprobes);
1583 /* defined in arch/.../kernel/kprobes.c */
1584 EXPORT_SYMBOL_GPL(jprobe_return);