[PATCH] kprobe: boost 2byte-opcodes on i386
[firefly-linux-kernel-4.4.55.git] / arch / i386 / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *  arch/i386/kernel/kprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) IBM Corporation, 2002, 2004
20  *
21  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22  *              Probes initial implementation ( includes contributions from
23  *              Rusty Russell).
24  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
25  *              interface to access function arguments.
26  * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
27  *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
28  *              <prasanna@in.ibm.com> added function-return probes.
29  */
30
31 #include <linux/config.h>
32 #include <linux/kprobes.h>
33 #include <linux/ptrace.h>
34 #include <linux/preempt.h>
35 #include <asm/cacheflush.h>
36 #include <asm/kdebug.h>
37 #include <asm/desc.h>
38 #include <asm/uaccess.h>
39
40 void jprobe_return_end(void);
41
42 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
44
45 /* insert a jmp code */
46 static __always_inline void set_jmp_op(void *from, void *to)
47 {
48         struct __arch_jmp_op {
49                 char op;
50                 long raddr;
51         } __attribute__((packed)) *jop;
52         jop = (struct __arch_jmp_op *)from;
53         jop->raddr = (long)(to) - ((long)(from) + 5);
54         jop->op = RELATIVEJUMP_INSTRUCTION;
55 }
56
57 /*
58  * returns non-zero if opcodes can be boosted.
59  */
60 static __always_inline int can_boost(kprobe_opcode_t *opcodes)
61 {
62 #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf)                \
63         (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
64           (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) |   \
65           (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) |   \
66           (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf))    \
67          << (row % 32))
68         /*
69          * Undefined/reserved opcodes, conditional jump, Opcode Extension
70          * Groups, and some special opcodes can not be boost.
71          */
72         static const unsigned long twobyte_is_boostable[256 / 32] = {
73                 /*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
74                 /*      -------------------------------         */
75                 W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */
76                 W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */
77                 W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */
78                 W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */
79                 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */
80                 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */
81                 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */
82                 W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */
83                 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */
84                 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */
85                 W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */
86                 W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */
87                 W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */
88                 W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */
89                 W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */
90                 W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0)  /* f0 */
91                 /*      -------------------------------         */
92                 /*      0 1 2 3 4 5 6 7 8 9 a b c d e f         */
93         };
94 #undef W
95         kprobe_opcode_t opcode;
96         kprobe_opcode_t *orig_opcodes = opcodes;
97 retry:
98         if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
99                 return 0;
100         opcode = *(opcodes++);
101
102         /* 2nd-byte opcode */
103         if (opcode == 0x0f) {
104                 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
105                         return 0;
106                 return test_bit(*opcodes, twobyte_is_boostable);
107         }
108
109         switch (opcode & 0xf0) {
110         case 0x60:
111                 if (0x63 < opcode && opcode < 0x67)
112                         goto retry; /* prefixes */
113                 /* can't boost Address-size override and bound */
114                 return (opcode != 0x62 && opcode != 0x67);
115         case 0x70:
116                 return 0; /* can't boost conditional jump */
117         case 0xc0:
118                 /* can't boost software-interruptions */
119                 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
120         case 0xd0:
121                 /* can boost AA* and XLAT */
122                 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
123         case 0xe0:
124                 /* can boost in/out and absolute jmps */
125                 return ((opcode & 0x04) || opcode == 0xea);
126         case 0xf0:
127                 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
128                         goto retry; /* lock/rep(ne) prefix */
129                 /* clear and set flags can be boost */
130                 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
131         default:
132                 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
133                         goto retry; /* prefixes */
134                 /* can't boost CS override and call */
135                 return (opcode != 0x2e && opcode != 0x9a);
136         }
137 }
138
139 /*
140  * returns non-zero if opcode modifies the interrupt flag.
141  */
142 static int __kprobes is_IF_modifier(kprobe_opcode_t opcode)
143 {
144         switch (opcode) {
145         case 0xfa:              /* cli */
146         case 0xfb:              /* sti */
147         case 0xcf:              /* iret/iretd */
148         case 0x9d:              /* popf/popfd */
149                 return 1;
150         }
151         return 0;
152 }
153
154 int __kprobes arch_prepare_kprobe(struct kprobe *p)
155 {
156         /* insn: must be on special executable page on i386. */
157         p->ainsn.insn = get_insn_slot();
158         if (!p->ainsn.insn)
159                 return -ENOMEM;
160
161         memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
162         p->opcode = *p->addr;
163         if (can_boost(p->addr)) {
164                 p->ainsn.boostable = 0;
165         } else {
166                 p->ainsn.boostable = -1;
167         }
168         return 0;
169 }
170
171 void __kprobes arch_arm_kprobe(struct kprobe *p)
172 {
173         *p->addr = BREAKPOINT_INSTRUCTION;
174         flush_icache_range((unsigned long) p->addr,
175                            (unsigned long) p->addr + sizeof(kprobe_opcode_t));
176 }
177
178 void __kprobes arch_disarm_kprobe(struct kprobe *p)
179 {
180         *p->addr = p->opcode;
181         flush_icache_range((unsigned long) p->addr,
182                            (unsigned long) p->addr + sizeof(kprobe_opcode_t));
183 }
184
185 void __kprobes arch_remove_kprobe(struct kprobe *p)
186 {
187         mutex_lock(&kprobe_mutex);
188         free_insn_slot(p->ainsn.insn);
189         mutex_unlock(&kprobe_mutex);
190 }
191
192 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
193 {
194         kcb->prev_kprobe.kp = kprobe_running();
195         kcb->prev_kprobe.status = kcb->kprobe_status;
196         kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
197         kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
198 }
199
200 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
201 {
202         __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
203         kcb->kprobe_status = kcb->prev_kprobe.status;
204         kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
205         kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
206 }
207
208 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
209                                 struct kprobe_ctlblk *kcb)
210 {
211         __get_cpu_var(current_kprobe) = p;
212         kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
213                 = (regs->eflags & (TF_MASK | IF_MASK));
214         if (is_IF_modifier(p->opcode))
215                 kcb->kprobe_saved_eflags &= ~IF_MASK;
216 }
217
218 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
219 {
220         regs->eflags |= TF_MASK;
221         regs->eflags &= ~IF_MASK;
222         /*single step inline if the instruction is an int3*/
223         if (p->opcode == BREAKPOINT_INSTRUCTION)
224                 regs->eip = (unsigned long)p->addr;
225         else
226                 regs->eip = (unsigned long)p->ainsn.insn;
227 }
228
229 /* Called with kretprobe_lock held */
230 void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
231                                       struct pt_regs *regs)
232 {
233         unsigned long *sara = (unsigned long *)&regs->esp;
234         struct kretprobe_instance *ri;
235
236         if ((ri = get_free_rp_inst(rp)) != NULL) {
237                 ri->rp = rp;
238                 ri->task = current;
239                 ri->ret_addr = (kprobe_opcode_t *) *sara;
240
241                 /* Replace the return addr with trampoline addr */
242                 *sara = (unsigned long) &kretprobe_trampoline;
243
244                 add_rp_inst(ri);
245         } else {
246                 rp->nmissed++;
247         }
248 }
249
250 /*
251  * Interrupts are disabled on entry as trap3 is an interrupt gate and they
252  * remain disabled thorough out this function.
253  */
254 static int __kprobes kprobe_handler(struct pt_regs *regs)
255 {
256         struct kprobe *p;
257         int ret = 0;
258         kprobe_opcode_t *addr;
259         struct kprobe_ctlblk *kcb;
260 #ifdef CONFIG_PREEMPT
261         unsigned pre_preempt_count = preempt_count();
262 #endif /* CONFIG_PREEMPT */
263
264         addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
265
266         /*
267          * We don't want to be preempted for the entire
268          * duration of kprobe processing
269          */
270         preempt_disable();
271         kcb = get_kprobe_ctlblk();
272
273         /* Check we're not actually recursing */
274         if (kprobe_running()) {
275                 p = get_kprobe(addr);
276                 if (p) {
277                         if (kcb->kprobe_status == KPROBE_HIT_SS &&
278                                 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
279                                 regs->eflags &= ~TF_MASK;
280                                 regs->eflags |= kcb->kprobe_saved_eflags;
281                                 goto no_kprobe;
282                         }
283                         /* We have reentered the kprobe_handler(), since
284                          * another probe was hit while within the handler.
285                          * We here save the original kprobes variables and
286                          * just single step on the instruction of the new probe
287                          * without calling any user handlers.
288                          */
289                         save_previous_kprobe(kcb);
290                         set_current_kprobe(p, regs, kcb);
291                         kprobes_inc_nmissed_count(p);
292                         prepare_singlestep(p, regs);
293                         kcb->kprobe_status = KPROBE_REENTER;
294                         return 1;
295                 } else {
296                         if (*addr != BREAKPOINT_INSTRUCTION) {
297                         /* The breakpoint instruction was removed by
298                          * another cpu right after we hit, no further
299                          * handling of this interrupt is appropriate
300                          */
301                                 regs->eip -= sizeof(kprobe_opcode_t);
302                                 ret = 1;
303                                 goto no_kprobe;
304                         }
305                         p = __get_cpu_var(current_kprobe);
306                         if (p->break_handler && p->break_handler(p, regs)) {
307                                 goto ss_probe;
308                         }
309                 }
310                 goto no_kprobe;
311         }
312
313         p = get_kprobe(addr);
314         if (!p) {
315                 if (*addr != BREAKPOINT_INSTRUCTION) {
316                         /*
317                          * The breakpoint instruction was removed right
318                          * after we hit it.  Another cpu has removed
319                          * either a probepoint or a debugger breakpoint
320                          * at this address.  In either case, no further
321                          * handling of this interrupt is appropriate.
322                          * Back up over the (now missing) int3 and run
323                          * the original instruction.
324                          */
325                         regs->eip -= sizeof(kprobe_opcode_t);
326                         ret = 1;
327                 }
328                 /* Not one of ours: let kernel handle it */
329                 goto no_kprobe;
330         }
331
332         set_current_kprobe(p, regs, kcb);
333         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
334
335         if (p->pre_handler && p->pre_handler(p, regs))
336                 /* handler has already set things up, so skip ss setup */
337                 return 1;
338
339         if (p->ainsn.boostable == 1 &&
340 #ifdef CONFIG_PREEMPT
341             !(pre_preempt_count) && /*
342                                        * This enables booster when the direct
343                                        * execution path aren't preempted.
344                                        */
345 #endif /* CONFIG_PREEMPT */
346             !p->post_handler && !p->break_handler ) {
347                 /* Boost up -- we can execute copied instructions directly */
348                 reset_current_kprobe();
349                 regs->eip = (unsigned long)p->ainsn.insn;
350                 preempt_enable_no_resched();
351                 return 1;
352         }
353
354 ss_probe:
355         prepare_singlestep(p, regs);
356         kcb->kprobe_status = KPROBE_HIT_SS;
357         return 1;
358
359 no_kprobe:
360         preempt_enable_no_resched();
361         return ret;
362 }
363
364 /*
365  * For function-return probes, init_kprobes() establishes a probepoint
366  * here. When a retprobed function returns, this probe is hit and
367  * trampoline_probe_handler() runs, calling the kretprobe's handler.
368  */
369  void __kprobes kretprobe_trampoline_holder(void)
370  {
371         asm volatile ( ".global kretprobe_trampoline\n"
372                         "kretprobe_trampoline: \n"
373                         "       pushf\n"
374                         /* skip cs, eip, orig_eax, es, ds */
375                         "       subl $20, %esp\n"
376                         "       pushl %eax\n"
377                         "       pushl %ebp\n"
378                         "       pushl %edi\n"
379                         "       pushl %esi\n"
380                         "       pushl %edx\n"
381                         "       pushl %ecx\n"
382                         "       pushl %ebx\n"
383                         "       movl %esp, %eax\n"
384                         "       call trampoline_handler\n"
385                         /* move eflags to cs */
386                         "       movl 48(%esp), %edx\n"
387                         "       movl %edx, 44(%esp)\n"
388                         /* save true return address on eflags */
389                         "       movl %eax, 48(%esp)\n"
390                         "       popl %ebx\n"
391                         "       popl %ecx\n"
392                         "       popl %edx\n"
393                         "       popl %esi\n"
394                         "       popl %edi\n"
395                         "       popl %ebp\n"
396                         "       popl %eax\n"
397                         /* skip eip, orig_eax, es, ds */
398                         "       addl $16, %esp\n"
399                         "       popf\n"
400                         "       ret\n");
401 }
402
403 /*
404  * Called from kretprobe_trampoline
405  */
406 fastcall void *__kprobes trampoline_handler(struct pt_regs *regs)
407 {
408         struct kretprobe_instance *ri = NULL;
409         struct hlist_head *head;
410         struct hlist_node *node, *tmp;
411         unsigned long flags, orig_ret_address = 0;
412         unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
413
414         spin_lock_irqsave(&kretprobe_lock, flags);
415         head = kretprobe_inst_table_head(current);
416
417         /*
418          * It is possible to have multiple instances associated with a given
419          * task either because an multiple functions in the call path
420          * have a return probe installed on them, and/or more then one return
421          * return probe was registered for a target function.
422          *
423          * We can handle this because:
424          *     - instances are always inserted at the head of the list
425          *     - when multiple return probes are registered for the same
426          *       function, the first instance's ret_addr will point to the
427          *       real return address, and all the rest will point to
428          *       kretprobe_trampoline
429          */
430         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
431                 if (ri->task != current)
432                         /* another task is sharing our hash bucket */
433                         continue;
434
435                 if (ri->rp && ri->rp->handler){
436                         __get_cpu_var(current_kprobe) = &ri->rp->kp;
437                         ri->rp->handler(ri, regs);
438                         __get_cpu_var(current_kprobe) = NULL;
439                 }
440
441                 orig_ret_address = (unsigned long)ri->ret_addr;
442                 recycle_rp_inst(ri);
443
444                 if (orig_ret_address != trampoline_address)
445                         /*
446                          * This is the real return address. Any other
447                          * instances associated with this task are for
448                          * other calls deeper on the call stack
449                          */
450                         break;
451         }
452
453         BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
454
455         spin_unlock_irqrestore(&kretprobe_lock, flags);
456
457         return (void*)orig_ret_address;
458 }
459
460 /*
461  * Called after single-stepping.  p->addr is the address of the
462  * instruction whose first byte has been replaced by the "int 3"
463  * instruction.  To avoid the SMP problems that can occur when we
464  * temporarily put back the original opcode to single-step, we
465  * single-stepped a copy of the instruction.  The address of this
466  * copy is p->ainsn.insn.
467  *
468  * This function prepares to return from the post-single-step
469  * interrupt.  We have to fix up the stack as follows:
470  *
471  * 0) Except in the case of absolute or indirect jump or call instructions,
472  * the new eip is relative to the copied instruction.  We need to make
473  * it relative to the original instruction.
474  *
475  * 1) If the single-stepped instruction was pushfl, then the TF and IF
476  * flags are set in the just-pushed eflags, and may need to be cleared.
477  *
478  * 2) If the single-stepped instruction was a call, the return address
479  * that is atop the stack is the address following the copied instruction.
480  * We need to make it the address following the original instruction.
481  *
482  * This function also checks instruction size for preparing direct execution.
483  */
484 static void __kprobes resume_execution(struct kprobe *p,
485                 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
486 {
487         unsigned long *tos = (unsigned long *)&regs->esp;
488         unsigned long copy_eip = (unsigned long)p->ainsn.insn;
489         unsigned long orig_eip = (unsigned long)p->addr;
490
491         regs->eflags &= ~TF_MASK;
492         switch (p->ainsn.insn[0]) {
493         case 0x9c:              /* pushfl */
494                 *tos &= ~(TF_MASK | IF_MASK);
495                 *tos |= kcb->kprobe_old_eflags;
496                 break;
497         case 0xc2:              /* iret/ret/lret */
498         case 0xc3:
499         case 0xca:
500         case 0xcb:
501         case 0xcf:
502         case 0xea:              /* jmp absolute -- eip is correct */
503                 /* eip is already adjusted, no more changes required */
504                 p->ainsn.boostable = 1;
505                 goto no_change;
506         case 0xe8:              /* call relative - Fix return addr */
507                 *tos = orig_eip + (*tos - copy_eip);
508                 break;
509         case 0x9a:              /* call absolute -- same as call absolute, indirect */
510                 *tos = orig_eip + (*tos - copy_eip);
511                 goto no_change;
512         case 0xff:
513                 if ((p->ainsn.insn[1] & 0x30) == 0x10) {
514                         /*
515                          * call absolute, indirect
516                          * Fix return addr; eip is correct.
517                          * But this is not boostable
518                          */
519                         *tos = orig_eip + (*tos - copy_eip);
520                         goto no_change;
521                 } else if (((p->ainsn.insn[1] & 0x31) == 0x20) ||       /* jmp near, absolute indirect */
522                            ((p->ainsn.insn[1] & 0x31) == 0x21)) {       /* jmp far, absolute indirect */
523                         /* eip is correct. And this is boostable */
524                         p->ainsn.boostable = 1;
525                         goto no_change;
526                 }
527         default:
528                 break;
529         }
530
531         if (p->ainsn.boostable == 0) {
532                 if ((regs->eip > copy_eip) &&
533                     (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) {
534                         /*
535                          * These instructions can be executed directly if it
536                          * jumps back to correct address.
537                          */
538                         set_jmp_op((void *)regs->eip,
539                                    (void *)orig_eip + (regs->eip - copy_eip));
540                         p->ainsn.boostable = 1;
541                 } else {
542                         p->ainsn.boostable = -1;
543                 }
544         }
545
546         regs->eip = orig_eip + (regs->eip - copy_eip);
547
548 no_change:
549         return;
550 }
551
552 /*
553  * Interrupts are disabled on entry as trap1 is an interrupt gate and they
554  * remain disabled thoroughout this function.
555  */
556 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
557 {
558         struct kprobe *cur = kprobe_running();
559         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
560
561         if (!cur)
562                 return 0;
563
564         if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
565                 kcb->kprobe_status = KPROBE_HIT_SSDONE;
566                 cur->post_handler(cur, regs, 0);
567         }
568
569         resume_execution(cur, regs, kcb);
570         regs->eflags |= kcb->kprobe_saved_eflags;
571
572         /*Restore back the original saved kprobes variables and continue. */
573         if (kcb->kprobe_status == KPROBE_REENTER) {
574                 restore_previous_kprobe(kcb);
575                 goto out;
576         }
577         reset_current_kprobe();
578 out:
579         preempt_enable_no_resched();
580
581         /*
582          * if somebody else is singlestepping across a probe point, eflags
583          * will have TF set, in which case, continue the remaining processing
584          * of do_debug, as if this is not a probe hit.
585          */
586         if (regs->eflags & TF_MASK)
587                 return 0;
588
589         return 1;
590 }
591
592 static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
593 {
594         struct kprobe *cur = kprobe_running();
595         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
596
597         switch(kcb->kprobe_status) {
598         case KPROBE_HIT_SS:
599         case KPROBE_REENTER:
600                 /*
601                  * We are here because the instruction being single
602                  * stepped caused a page fault. We reset the current
603                  * kprobe and the eip points back to the probe address
604                  * and allow the page fault handler to continue as a
605                  * normal page fault.
606                  */
607                 regs->eip = (unsigned long)cur->addr;
608                 regs->eflags |= kcb->kprobe_old_eflags;
609                 if (kcb->kprobe_status == KPROBE_REENTER)
610                         restore_previous_kprobe(kcb);
611                 else
612                         reset_current_kprobe();
613                 preempt_enable_no_resched();
614                 break;
615         case KPROBE_HIT_ACTIVE:
616         case KPROBE_HIT_SSDONE:
617                 /*
618                  * We increment the nmissed count for accounting,
619                  * we can also use npre/npostfault count for accouting
620                  * these specific fault cases.
621                  */
622                 kprobes_inc_nmissed_count(cur);
623
624                 /*
625                  * We come here because instructions in the pre/post
626                  * handler caused the page_fault, this could happen
627                  * if handler tries to access user space by
628                  * copy_from_user(), get_user() etc. Let the
629                  * user-specified handler try to fix it first.
630                  */
631                 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
632                         return 1;
633
634                 /*
635                  * In case the user-specified fault handler returned
636                  * zero, try to fix up.
637                  */
638                 if (fixup_exception(regs))
639                         return 1;
640
641                 /*
642                  * fixup_exception() could not handle it,
643                  * Let do_page_fault() fix it.
644                  */
645                 break;
646         default:
647                 break;
648         }
649         return 0;
650 }
651
652 /*
653  * Wrapper routine to for handling exceptions.
654  */
655 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
656                                        unsigned long val, void *data)
657 {
658         struct die_args *args = (struct die_args *)data;
659         int ret = NOTIFY_DONE;
660
661         if (args->regs && user_mode_vm(args->regs))
662                 return ret;
663
664         switch (val) {
665         case DIE_INT3:
666                 if (kprobe_handler(args->regs))
667                         ret = NOTIFY_STOP;
668                 break;
669         case DIE_DEBUG:
670                 if (post_kprobe_handler(args->regs))
671                         ret = NOTIFY_STOP;
672                 break;
673         case DIE_GPF:
674         case DIE_PAGE_FAULT:
675                 /* kprobe_running() needs smp_processor_id() */
676                 preempt_disable();
677                 if (kprobe_running() &&
678                     kprobe_fault_handler(args->regs, args->trapnr))
679                         ret = NOTIFY_STOP;
680                 preempt_enable();
681                 break;
682         default:
683                 break;
684         }
685         return ret;
686 }
687
688 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
689 {
690         struct jprobe *jp = container_of(p, struct jprobe, kp);
691         unsigned long addr;
692         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
693
694         kcb->jprobe_saved_regs = *regs;
695         kcb->jprobe_saved_esp = &regs->esp;
696         addr = (unsigned long)(kcb->jprobe_saved_esp);
697
698         /*
699          * TBD: As Linus pointed out, gcc assumes that the callee
700          * owns the argument space and could overwrite it, e.g.
701          * tailcall optimization. So, to be absolutely safe
702          * we also save and restore enough stack bytes to cover
703          * the argument area.
704          */
705         memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
706                         MIN_STACK_SIZE(addr));
707         regs->eflags &= ~IF_MASK;
708         regs->eip = (unsigned long)(jp->entry);
709         return 1;
710 }
711
712 void __kprobes jprobe_return(void)
713 {
714         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
715
716         asm volatile ("       xchgl   %%ebx,%%esp     \n"
717                       "       int3                      \n"
718                       "       .globl jprobe_return_end  \n"
719                       "       jprobe_return_end:        \n"
720                       "       nop                       \n"::"b"
721                       (kcb->jprobe_saved_esp):"memory");
722 }
723
724 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
725 {
726         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
727         u8 *addr = (u8 *) (regs->eip - 1);
728         unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
729         struct jprobe *jp = container_of(p, struct jprobe, kp);
730
731         if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
732                 if (&regs->esp != kcb->jprobe_saved_esp) {
733                         struct pt_regs *saved_regs =
734                             container_of(kcb->jprobe_saved_esp,
735                                             struct pt_regs, esp);
736                         printk("current esp %p does not match saved esp %p\n",
737                                &regs->esp, kcb->jprobe_saved_esp);
738                         printk("Saved registers for jprobe %p\n", jp);
739                         show_registers(saved_regs);
740                         printk("Current registers\n");
741                         show_registers(regs);
742                         BUG();
743                 }
744                 *regs = kcb->jprobe_saved_regs;
745                 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
746                        MIN_STACK_SIZE(stack_addr));
747                 preempt_enable_no_resched();
748                 return 1;
749         }
750         return 0;
751 }
752
753 int __init arch_init_kprobes(void)
754 {
755         return 0;
756 }