kprobes: Jump optimization sysctl interface
authorMasami Hiramatsu <mhiramat@redhat.com>
Thu, 25 Feb 2010 13:34:15 +0000 (08:34 -0500)
committerIngo Molnar <mingo@elte.hu>
Thu, 25 Feb 2010 16:49:25 +0000 (17:49 +0100)
Add /proc/sys/debug/kprobes-optimization sysctl which enables
and disables kprobes jump optimization on the fly for debugging.

Changes in v7:
 - Remove ctl_name = CTL_UNNUMBERED for upstream compatibility.

Changes in v6:
- Update comments and coding style.

Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Cc: systemtap <systemtap@sources.redhat.com>
Cc: DLE <dle-develop@lists.sourceforge.net>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Anders Kaseorg <andersk@ksplice.com>
Cc: Tim Abbott <tabbott@ksplice.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
LKML-Reference: <20100225133415.6725.8274.stgit@localhost6.localdomain6>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/kprobes.h
kernel/kprobes.c
kernel/sysctl.c

index aed1f95c582f54f7ad236dad714bcda59504aaf0..e7d1b2e0070d3570b7022877a79fa2f0ed081507 100644 (file)
@@ -283,6 +283,14 @@ extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
                                        unsigned long addr);
 
 extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
+
+#ifdef CONFIG_SYSCTL
+extern int sysctl_kprobes_optimization;
+extern int proc_kprobes_optimization_handler(struct ctl_table *table,
+                                            int write, void __user *buffer,
+                                            size_t *length, loff_t *ppos);
+#endif
+
 #endif /* CONFIG_OPTPROBES */
 
 /* Get the kprobe at this addr (if any) - called with preemption disabled */
index 612af2d616141b35beb410a5adc58a9be5fecd5b..fa034d29cf73d5730f531b7bfd98682278d077b4 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/freezer.h>
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
+#include <linux/sysctl.h>
 #include <linux/kdebug.h>
 #include <linux/memory.h>
 #include <linux/ftrace.h>
@@ -360,6 +361,9 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
 }
 
 #ifdef CONFIG_OPTPROBES
+/* NOTE: change this value only with kprobe_mutex held */
+static bool kprobes_allow_optimization;
+
 /*
  * Call all pre_handler on the list, but ignores its return value.
  * This must be called from arch-dep optimized caller.
@@ -428,7 +432,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
        /* Lock modules while optimizing kprobes */
        mutex_lock(&module_mutex);
        mutex_lock(&kprobe_mutex);
-       if (kprobes_all_disarmed)
+       if (kprobes_all_disarmed || !kprobes_allow_optimization)
                goto end;
 
        /*
@@ -471,7 +475,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p)
        struct optimized_kprobe *op;
 
        /* Check if the kprobe is disabled or not ready for optimization. */
-       if (!kprobe_optready(p) ||
+       if (!kprobe_optready(p) || !kprobes_allow_optimization ||
            (kprobe_disabled(p) || kprobes_all_disarmed))
                return;
 
@@ -588,6 +592,80 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
        optimize_kprobe(ap);
 }
 
+#ifdef CONFIG_SYSCTL
+static void __kprobes optimize_all_kprobes(void)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct kprobe *p;
+       unsigned int i;
+
+       /* If optimization is already allowed, just return */
+       if (kprobes_allow_optimization)
+               return;
+
+       kprobes_allow_optimization = true;
+       mutex_lock(&text_mutex);
+       for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+               head = &kprobe_table[i];
+               hlist_for_each_entry_rcu(p, node, head, hlist)
+                       if (!kprobe_disabled(p))
+                               optimize_kprobe(p);
+       }
+       mutex_unlock(&text_mutex);
+       printk(KERN_INFO "Kprobes globally optimized\n");
+}
+
+static void __kprobes unoptimize_all_kprobes(void)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct kprobe *p;
+       unsigned int i;
+
+       /* If optimization is already prohibited, just return */
+       if (!kprobes_allow_optimization)
+               return;
+
+       kprobes_allow_optimization = false;
+       printk(KERN_INFO "Kprobes globally unoptimized\n");
+       get_online_cpus();      /* For avoiding text_mutex deadlock */
+       mutex_lock(&text_mutex);
+       for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+               head = &kprobe_table[i];
+               hlist_for_each_entry_rcu(p, node, head, hlist) {
+                       if (!kprobe_disabled(p))
+                               unoptimize_kprobe(p);
+               }
+       }
+
+       mutex_unlock(&text_mutex);
+       put_online_cpus();
+       /* Allow all currently running kprobes to complete */
+       synchronize_sched();
+}
+
+int sysctl_kprobes_optimization;
+int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
+                                     void __user *buffer, size_t *length,
+                                     loff_t *ppos)
+{
+       int ret;
+
+       mutex_lock(&kprobe_mutex);
+       sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
+       ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+       if (sysctl_kprobes_optimization)
+               optimize_all_kprobes();
+       else
+               unoptimize_all_kprobes();
+       mutex_unlock(&kprobe_mutex);
+
+       return ret;
+}
+#endif /* CONFIG_SYSCTL */
+
 static void __kprobes __arm_kprobe(struct kprobe *p)
 {
        struct kprobe *old_p;
@@ -1610,10 +1688,14 @@ static int __init init_kprobes(void)
                }
        }
 
-#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
+#if defined(CONFIG_OPTPROBES)
+#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
        /* Init kprobe_optinsn_slots */
        kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
 #endif
+       /* By default, kprobes can be optimized */
+       kprobes_allow_optimization = true;
+#endif
 
        /* By default, kprobes are armed */
        kprobes_all_disarmed = false;
index 8a68b244846805869e90f242fb563913ff485d5c..40d791d616b521d9ed974e8a5be7c9cba2ec3660 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/ftrace.h>
 #include <linux/slow-work.h>
 #include <linux/perf_event.h>
+#include <linux/kprobes.h>
 
 #include <asm/uaccess.h>
 #include <asm/processor.h>
@@ -1449,6 +1450,17 @@ static struct ctl_table debug_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+#endif
+#if defined(CONFIG_OPTPROBES)
+       {
+               .procname       = "kprobes-optimization",
+               .data           = &sysctl_kprobes_optimization,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_kprobes_optimization_handler,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
 #endif
        { }
 };