uprobes/x86: Emulate unconditional relative jmp's
authorOleg Nesterov <oleg@redhat.com>
Sat, 5 Apr 2014 18:05:02 +0000 (20:05 +0200)
committerOleg Nesterov <oleg@redhat.com>
Thu, 17 Apr 2014 19:58:22 +0000 (21:58 +0200)
Currently we always execute all insns out-of-line, including relative
jmp's and call's. This assumes that even if regs->ip points to nowhere
after the single-step, default_post_xol_op(UPROBE_FIX_IP) logic will
update it correctly.

However, this doesn't work if this regs->ip == xol_vaddr + insn_offset
is not canonical. In this case CPU generates #GP and general_protection()
kills the task which tries to execute this insn out-of-line.

Now that we have uprobe_xol_ops we can teach uprobes to emulate these
insns and solve the problem. This patch adds branch_xol_ops which has
a single branch_emulate_op() hook, so far it can only handle rel8/32
relative jmp's.

TODO: move ->fixup into the union along with rip_rela_target_address.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reported-by: Jonathan Lebon <jlebon@redhat.com>
Reviewed-by: Jim Keniston <jkenisto@us.ibm.com>
arch/x86/include/asm/uprobes.h
arch/x86/kernel/uprobes.c

index 9f8210bcbb49d9716061eb52a8a49019cee2afb4..e9fd4d5537edc4676f266f21483ce4afef37d4e0 100644 (file)
@@ -44,9 +44,15 @@ struct arch_uprobe {
        u16                             fixups;
        const struct uprobe_xol_ops     *ops;
 
+       union {
 #ifdef CONFIG_X86_64
-       unsigned long                   rip_rela_target_address;
+               unsigned long                   rip_rela_target_address;
 #endif
+               struct {
+                       s32     offs;
+                       u8      ilen;
+               }                               branch;
+       };
 };
 
 struct arch_uprobe_task {
index aecc220543843dc387675323951b2d7fa59a533c..c3baeaacf1b6c0a77f101b16bfb315365b9c7dc5 100644 (file)
@@ -461,6 +461,40 @@ static struct uprobe_xol_ops default_xol_ops = {
        .post_xol = default_post_xol_op,
 };
 
+static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+       regs->ip += auprobe->branch.ilen + auprobe->branch.offs;
+       return true;
+}
+
+static struct uprobe_xol_ops branch_xol_ops = {
+       .emulate  = branch_emulate_op,
+};
+
+/* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
+static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+{
+
+       switch (OPCODE1(insn)) {
+       case 0xeb:      /* jmp 8 */
+       case 0xe9:      /* jmp 32 */
+               break;
+       default:
+               return -ENOSYS;
+       }
+
+       /* has the side-effect of processing the entire instruction */
+       insn_get_length(insn);
+       if (WARN_ON_ONCE(!insn_complete(insn)))
+               return -ENOEXEC;
+
+       auprobe->branch.ilen = insn->length;
+       auprobe->branch.offs = insn->immediate.value;
+
+       auprobe->ops = &branch_xol_ops;
+       return 0;
+}
+
 /**
  * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
  * @mm: the probed address space.
@@ -478,6 +512,10 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
        if (ret)
                return ret;
 
+       ret = branch_setup_xol_ops(auprobe, &insn);
+       if (ret != -ENOSYS)
+               return ret;
+
        /*
         * Figure out which fixups arch_uprobe_post_xol() will need to perform,
         * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups