2 * User-space Probes (UProbes) for x86
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2008-2011
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/uaccess.h>
29 #include <linux/kdebug.h>
30 #include <asm/processor.h>
33 /* Post-execution fixups. */
35 /* Adjust IP back to vicinity of actual insn */
36 #define UPROBE_FIX_IP 0x01
38 /* Adjust the return address of a call insn */
39 #define UPROBE_FIX_CALL 0x02
41 /* Instruction will modify TF, don't change it */
42 #define UPROBE_FIX_SETF 0x04
44 #define UPROBE_FIX_RIP_AX 0x08
45 #define UPROBE_FIX_RIP_CX 0x10
47 #define UPROBE_TRAP_NR UINT_MAX
49 /* Adaptations for mhiramat x86 decoder v14. */
50 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
51 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
52 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
53 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
55 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
56 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
57 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
58 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
59 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
63 * Good-instruction tables for 32-bit apps. This is non-const and volatile
64 * to keep gcc from statically optimizing it out, as variable_test_bit makes
65 * some versions of gcc to think only *(unsigned long*) is used.
67 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
68 static volatile u32 good_insns_32[256 / 32] = {
69 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
70 /* ---------------------------------------------- */
71 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
72 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
73 W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
74 W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
75 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
76 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
77 W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
78 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
79 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
80 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
81 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
82 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
83 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
84 W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
85 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
86 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
87 /* ---------------------------------------------- */
88 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
91 #define good_insns_32 NULL
94 /* Good-instruction tables for 64-bit apps */
95 #if defined(CONFIG_X86_64)
96 static volatile u32 good_insns_64[256 / 32] = {
97 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
98 /* ---------------------------------------------- */
99 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
100 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
101 W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
102 W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
103 W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
104 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
105 W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
106 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
107 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
108 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
109 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
110 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
111 W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
112 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
113 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
114 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
115 /* ---------------------------------------------- */
116 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
119 #define good_insns_64 NULL
122 /* Using this for both 64-bit and 32-bit apps */
123 static volatile u32 good_2byte_insns[256 / 32] = {
124 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
125 /* ---------------------------------------------- */
126 W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
127 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
128 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
129 W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
130 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
131 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
132 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
133 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
134 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
135 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
136 W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
137 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
138 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
139 W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
140 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
141 W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */
142 /* ---------------------------------------------- */
143 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
148 * opcodes we'll probably never support:
150 * 6c-6d, e4-e5, ec-ed - in
151 * 6e-6f, e6-e7, ee-ef - out
154 * d6 - illegal instruction
158 * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
160 * invalid opcodes in 64-bit mode:
162 * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
163 * 63 - we support this opcode in x86_64 but not in i386.
165 * opcodes we may need to refine support for:
167 * 0f - 2-byte instructions: For many of these instructions, the validity
168 * depends on the prefix and/or the reg field. On such instructions, we
169 * just consider the opcode combination valid if it corresponds to any
172 * 8f - Group 1 - only reg = 0 is OK
173 * c6-c7 - Group 11 - only reg = 0 is OK
174 * d9-df - fpu insns with some illegal encodings
175 * f2, f3 - repnz, repz prefixes. These are also the first byte for
176 * certain floating-point instructions, such as addsd.
178 * fe - Group 4 - only reg = 0 or 1 is OK
179 * ff - Group 5 - only reg = 0-6 is OK
181 * others -- Do we need to support these?
183 * 0f - (floating-point?) prefetch instructions
184 * 07, 17, 1f - pop es, pop ss, pop ds
185 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
186 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
194 * - Where necessary, examine the modrm byte and allow only valid instructions
195 * in the different Groups and fpu instructions.
198 static bool is_prefix_bad(struct insn *insn)
202 for (i = 0; i < insn->prefixes.nbytes; i++) {
203 switch (insn->prefixes.bytes[i]) {
204 case 0x26: /* INAT_PFX_ES */
205 case 0x2E: /* INAT_PFX_CS */
206 case 0x36: /* INAT_PFX_DS */
207 case 0x3E: /* INAT_PFX_SS */
208 case 0xF0: /* INAT_PFX_LOCK */
215 static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
217 u32 volatile *good_insns;
219 insn_init(insn, auprobe->insn, x86_64);
220 /* has the side-effect of processing the entire instruction */
221 insn_get_length(insn);
222 if (WARN_ON_ONCE(!insn_complete(insn)))
225 if (is_prefix_bad(insn))
229 good_insns = good_insns_64;
231 good_insns = good_insns_32;
233 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
236 if (insn->opcode.nbytes == 2) {
237 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
245 static inline bool is_64bit_mm(struct mm_struct *mm)
247 return !config_enabled(CONFIG_IA32_EMULATION) ||
248 !(mm->context.ia32_compat == TIF_IA32);
251 * If arch_uprobe->insn doesn't use rip-relative addressing, return
252 * immediately. Otherwise, rewrite the instruction so that it accesses
253 * its memory operand indirectly through a scratch register. Set
254 * def->fixups and def->riprel_target accordingly. (The contents of the
255 * scratch register will be saved before we single-step the modified
256 * instruction, and restored afterward).
258 * We do this because a rip-relative instruction can access only a
259 * relatively small area (+/- 2 GB from the instruction), and the XOL
260 * area typically lies beyond that area. At least for instructions
261 * that store to memory, we can't execute the original instruction
262 * and "fix things up" later, because the misdirected store could be
265 * Some useful facts about rip-relative instructions:
267 * - There's always a modrm byte.
268 * - There's never a SIB byte.
269 * - The displacement is always 4 bytes.
272 handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
277 if (!insn_rip_relative(insn))
281 * insn_rip_relative() would have decoded rex_prefix, modrm.
282 * Clear REX.b bit (extension of MODRM.rm field):
283 * we want to encode rax/rcx, not r8/r9.
285 if (insn->rex_prefix.nbytes) {
286 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
287 *cursor &= 0xfe; /* Clearing REX.B bit */
291 * Point cursor at the modrm byte. The next 4 bytes are the
292 * displacement. Beyond the displacement, for some instructions,
293 * is the immediate operand.
295 cursor = auprobe->insn + insn_offset_modrm(insn);
297 * Convert from rip-relative addressing to indirect addressing
298 * via a scratch register. Change the r/m field from 0x5 (%rip)
299 * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field.
301 reg = MODRM_REG(insn);
304 * The register operand (if any) is either the A register
305 * (%rax, %eax, etc.) or (if the 0x4 bit is set in the
306 * REX prefix) %r8. In any case, we know the C register
307 * is NOT the register operand, so we use %rcx (register
308 * #1) for the scratch register.
310 auprobe->def.fixups |= UPROBE_FIX_RIP_CX;
311 /* Change modrm from 00 000 101 to 00 000 001. */
314 /* Use %rax (register #0) for the scratch register. */
315 auprobe->def.fixups |= UPROBE_FIX_RIP_AX;
316 /* Change modrm from 00 xxx 101 to 00 xxx 000 */
317 *cursor = (reg << 3);
320 /* Target address = address of next instruction + (signed) offset */
321 auprobe->def.riprel_target = (long)insn->length + insn->displacement.value;
323 /* Displacement field is gone; slide immediate field (if any) over. */
324 if (insn->immediate.nbytes) {
326 memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes);
331 * If we're emulating a rip-relative instruction, save the contents
332 * of the scratch register and store the target address in that register.
335 pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
336 struct arch_uprobe_task *autask)
338 if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) {
339 autask->saved_scratch_register = regs->ax;
340 regs->ax = current->utask->vaddr;
341 regs->ax += auprobe->def.riprel_target;
342 } else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) {
343 autask->saved_scratch_register = regs->cx;
344 regs->cx = current->utask->vaddr;
345 regs->cx += auprobe->def.riprel_target;
350 handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
352 if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
353 struct arch_uprobe_task *autask;
355 autask = ¤t->utask->autask;
356 if (auprobe->def.fixups & UPROBE_FIX_RIP_AX)
357 regs->ax = autask->saved_scratch_register;
359 regs->cx = autask->saved_scratch_register;
362 * The original instruction includes a displacement, and so
363 * is 4 bytes longer than what we've just single-stepped.
364 * Caller may need to apply other fixups to handle stuff
365 * like "jmpq *...(%rip)" and "callq *...(%rip)".
372 static inline bool is_64bit_mm(struct mm_struct *mm)
377 * No RIP-relative addressing on 32-bit
379 static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
382 static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
383 struct arch_uprobe_task *autask)
386 static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
390 #endif /* CONFIG_X86_64 */
392 struct uprobe_xol_ops {
393 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
394 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
395 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
396 void (*abort)(struct arch_uprobe *, struct pt_regs *);
399 static inline int sizeof_long(void)
401 return is_ia32_task() ? 4 : 8;
404 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
406 pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask);
410 static int push_ret_address(struct pt_regs *regs, unsigned long ip)
412 unsigned long new_sp = regs->sp - sizeof_long();
414 if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
421 static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
423 struct uprobe_task *utask = current->utask;
424 long correction = (long)(utask->vaddr - utask->xol_vaddr);
426 handle_riprel_post_xol(auprobe, regs, &correction);
427 if (auprobe->def.fixups & UPROBE_FIX_IP) {
428 regs->ip += correction;
429 } else if (auprobe->def.fixups & UPROBE_FIX_CALL) {
430 regs->sp += sizeof_long();
431 if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen))
434 /* popf; tell the caller to not touch TF */
435 if (auprobe->def.fixups & UPROBE_FIX_SETF)
436 utask->autask.saved_tf = true;
441 static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
443 handle_riprel_post_xol(auprobe, regs, NULL);
446 static struct uprobe_xol_ops default_xol_ops = {
447 .pre_xol = default_pre_xol_op,
448 .post_xol = default_post_xol_op,
449 .abort = default_abort_op,
452 static bool branch_is_call(struct arch_uprobe *auprobe)
454 return auprobe->branch.opc1 == 0xe8;
458 COND(70, 71, XF(OF)) \
459 COND(72, 73, XF(CF)) \
460 COND(74, 75, XF(ZF)) \
461 COND(78, 79, XF(SF)) \
462 COND(7a, 7b, XF(PF)) \
463 COND(76, 77, XF(CF) || XF(ZF)) \
464 COND(7c, 7d, XF(SF) != XF(OF)) \
465 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
467 #define COND(op_y, op_n, expr) \
468 case 0x ## op_y: DO((expr) != 0) \
469 case 0x ## op_n: DO((expr) == 0)
471 #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
473 static bool is_cond_jmp_opcode(u8 opcode)
486 static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
488 unsigned long flags = regs->flags;
490 switch (auprobe->branch.opc1) {
496 default: /* not a conditional jmp */
505 static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
507 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
508 unsigned long offs = (long)auprobe->branch.offs;
510 if (branch_is_call(auprobe)) {
512 * If it fails we execute this (mangled, see the comment in
513 * branch_clear_offset) insn out-of-line. In the likely case
514 * this should trigger the trap, and the probed application
515 * should die or restart the same insn after it handles the
516 * signal, arch_uprobe_post_xol() won't be even called.
518 * But there is corner case, see the comment in ->post_xol().
520 if (push_ret_address(regs, new_ip))
522 } else if (!check_jmp_cond(auprobe, regs)) {
526 regs->ip = new_ip + offs;
530 static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
532 BUG_ON(!branch_is_call(auprobe));
534 * We can only get here if branch_emulate_op() failed to push the ret
535 * address _and_ another thread expanded our stack before the (mangled)
536 * "call" insn was executed out-of-line. Just restore ->sp and restart.
537 * We could also restore ->ip and try to call branch_emulate_op() again.
539 regs->sp += sizeof_long();
543 static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
546 * Turn this insn into "call 1f; 1:", this is what we will execute
547 * out-of-line if ->emulate() fails. We only need this to generate
548 * a trap, so that the probed task receives the correct signal with
549 * the properly filled siginfo.
551 * But see the comment in ->post_xol(), in the unlikely case it can
552 * succeed. So we need to ensure that the new ->ip can not fall into
553 * the non-canonical area and trigger #GP.
555 * We could turn it into (say) "pushf", but then we would need to
556 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte
557 * of ->insn[] for set_orig_insn().
559 memset(auprobe->insn + insn_offset_immediate(insn),
560 0, insn->immediate.nbytes);
563 static struct uprobe_xol_ops branch_xol_ops = {
564 .emulate = branch_emulate_op,
565 .post_xol = branch_post_xol_op,
568 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
569 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
571 u8 opc1 = OPCODE1(insn);
575 case 0xeb: /* jmp 8 */
576 case 0xe9: /* jmp 32 */
577 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
580 case 0xe8: /* call relative */
581 branch_clear_offset(auprobe, insn);
585 if (insn->opcode.nbytes != 2)
588 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches
589 * OPCODE1() of the "short" jmp which checks the same condition.
591 opc1 = OPCODE2(insn) - 0x10;
593 if (!is_cond_jmp_opcode(opc1))
598 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported.
599 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
600 * No one uses these insns, reject any branch insns with such prefix.
602 for (i = 0; i < insn->prefixes.nbytes; i++) {
603 if (insn->prefixes.bytes[i] == 0x66)
607 auprobe->branch.opc1 = opc1;
608 auprobe->branch.ilen = insn->length;
609 auprobe->branch.offs = insn->immediate.value;
611 auprobe->ops = &branch_xol_ops;
616 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
617 * @mm: the probed address space.
618 * @arch_uprobe: the probepoint information.
619 * @addr: virtual address at which to install the probepoint
620 * Return 0 on success or a -ve number on error.
622 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
625 u8 fix_ip_or_call = UPROBE_FIX_IP;
628 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
632 ret = branch_setup_xol_ops(auprobe, &insn);
637 * Figure out which fixups default_post_xol_op() will need to perform,
638 * and annotate def->fixups accordingly. To start with, ->fixups is
639 * either zero or it reflects rip-related fixups.
641 switch (OPCODE1(&insn)) {
642 case 0x9d: /* popf */
643 auprobe->def.fixups |= UPROBE_FIX_SETF;
645 case 0xc3: /* ret or lret -- ip is correct */
649 case 0xea: /* jmp absolute -- ip is correct */
652 case 0x9a: /* call absolute - Fix return addr, not ip */
653 fix_ip_or_call = UPROBE_FIX_CALL;
656 switch (MODRM_REG(&insn)) {
657 case 2: case 3: /* call or lcall, indirect */
658 fix_ip_or_call = UPROBE_FIX_CALL;
660 case 4: case 5: /* jmp or ljmp, indirect */
666 handle_riprel_insn(auprobe, &insn);
669 auprobe->def.ilen = insn.length;
670 auprobe->def.fixups |= fix_ip_or_call;
672 auprobe->ops = &default_xol_ops;
677 * arch_uprobe_pre_xol - prepare to execute out of line.
678 * @auprobe: the probepoint information.
679 * @regs: reflects the saved user state of current task.
681 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
683 struct uprobe_task *utask = current->utask;
685 if (auprobe->ops->pre_xol) {
686 int err = auprobe->ops->pre_xol(auprobe, regs);
691 regs->ip = utask->xol_vaddr;
692 utask->autask.saved_trap_nr = current->thread.trap_nr;
693 current->thread.trap_nr = UPROBE_TRAP_NR;
695 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
696 regs->flags |= X86_EFLAGS_TF;
697 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
698 set_task_blockstep(current, false);
704 * If xol insn itself traps and generates a signal(Say,
705 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
706 * instruction jumps back to its own address. It is assumed that anything
707 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
709 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
710 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
711 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
713 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
715 if (t->thread.trap_nr != UPROBE_TRAP_NR)
722 * Called after single-stepping. To avoid the SMP problems that can
723 * occur when we temporarily put back the original opcode to
724 * single-step, we single-stepped a copy of the instruction.
726 * This function prepares to resume execution after the single-step.
727 * We have to fix things up as follows:
729 * Typically, the new ip is relative to the copied instruction. We need
730 * to make it relative to the original instruction (FIX_IP). Exceptions
731 * are return instructions and absolute or indirect jump or call instructions.
733 * If the single-stepped instruction was a call, the return address that
734 * is atop the stack is the address following the copied instruction. We
735 * need to make it the address following the original instruction (FIX_CALL).
737 * If the original instruction was a rip-relative instruction such as
738 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
739 * instruction using a scratch register -- e.g., "movl %edx,(%rax)".
740 * We need to restore the contents of the scratch register and adjust
741 * the ip, keeping in mind that the instruction we executed is 4 bytes
742 * shorter than the original instruction (since we squeezed out the offset
743 * field). (FIX_RIP_AX or FIX_RIP_CX)
745 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
747 struct uprobe_task *utask = current->utask;
748 bool send_sigtrap = utask->autask.saved_tf;
751 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
752 current->thread.trap_nr = utask->autask.saved_trap_nr;
754 if (auprobe->ops->post_xol) {
755 err = auprobe->ops->post_xol(auprobe, regs);
758 * Restore ->ip for restart or post mortem analysis.
759 * ->post_xol() must not return -ERESTART unless this
760 * is really possible.
762 regs->ip = utask->vaddr;
763 if (err == -ERESTART)
765 send_sigtrap = false;
769 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
770 * so we can get an extra SIGTRAP if we do not clear TF. We need
771 * to examine the opcode to make it right.
774 send_sig(SIGTRAP, current, 0);
776 if (!utask->autask.saved_tf)
777 regs->flags &= ~X86_EFLAGS_TF;
782 /* callback routine for handling exceptions. */
783 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
785 struct die_args *args = data;
786 struct pt_regs *regs = args->regs;
787 int ret = NOTIFY_DONE;
789 /* We are only interested in userspace traps */
790 if (regs && !user_mode_vm(regs))
795 if (uprobe_pre_sstep_notifier(regs))
801 if (uprobe_post_sstep_notifier(regs))
812 * This function gets called when XOL instruction either gets trapped or
813 * the thread has a fatal signal. Reset the instruction pointer to its
814 * probed address for the potential restart or for post mortem analysis.
816 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
818 struct uprobe_task *utask = current->utask;
820 if (auprobe->ops->abort)
821 auprobe->ops->abort(auprobe, regs);
823 current->thread.trap_nr = utask->autask.saved_trap_nr;
824 regs->ip = utask->vaddr;
825 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
826 if (!utask->autask.saved_tf)
827 regs->flags &= ~X86_EFLAGS_TF;
830 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
832 if (auprobe->ops->emulate)
833 return auprobe->ops->emulate(auprobe, regs);
837 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
839 bool ret = __skip_sstep(auprobe, regs);
840 if (ret && (regs->flags & X86_EFLAGS_TF))
841 send_sig(SIGTRAP, current, 0);
846 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
848 int rasize = sizeof_long(), nleft;
849 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
851 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
854 /* check whether address has been already hijacked */
855 if (orig_ret_vaddr == trampoline_vaddr)
856 return orig_ret_vaddr;
858 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
860 return orig_ret_vaddr;
862 if (nleft != rasize) {
863 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
864 "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
866 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);