2 * User-space Probes (UProbes) for x86
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2008-2011
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/uaccess.h>
29 #include <linux/kdebug.h>
30 #include <asm/processor.h>
33 /* Post-execution fixups. */
36 #define UPROBE_FIX_NONE 0x0
38 /* Adjust IP back to vicinity of actual insn */
39 #define UPROBE_FIX_IP 0x1
41 /* Adjust the return address of a call insn */
42 #define UPROBE_FIX_CALL 0x2
44 /* Instruction will modify TF, don't change it */
45 #define UPROBE_FIX_SETF 0x4
47 #define UPROBE_FIX_RIP_AX 0x8000
48 #define UPROBE_FIX_RIP_CX 0x4000
50 #define UPROBE_TRAP_NR UINT_MAX
52 /* Adaptations for mhiramat x86 decoder v14. */
53 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
54 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
55 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
56 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
58 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
59 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
60 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
61 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
62 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
66 * Good-instruction tables for 32-bit apps. This is non-const and volatile
67 * to keep gcc from statically optimizing it out, as variable_test_bit makes
68 * some versions of gcc to think only *(unsigned long*) is used.
70 static volatile u32 good_insns_32[256 / 32] = {
71 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
72 /* ---------------------------------------------- */
73 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
74 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
75 W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
76 W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
77 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
78 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
79 W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
80 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
81 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
82 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
83 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
84 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
85 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
86 W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
87 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
88 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
89 /* ---------------------------------------------- */
90 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
93 /* Using this for both 64-bit and 32-bit apps */
94 static volatile u32 good_2byte_insns[256 / 32] = {
95 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
96 /* ---------------------------------------------- */
97 W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
98 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
99 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
100 W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
101 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
102 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
103 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
104 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
105 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
106 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
107 W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
108 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
109 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
110 W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
111 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
112 W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */
113 /* ---------------------------------------------- */
114 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
118 /* Good-instruction tables for 64-bit apps */
119 static volatile u32 good_insns_64[256 / 32] = {
120 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
121 /* ---------------------------------------------- */
122 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
123 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
124 W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
125 W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
126 W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
127 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
128 W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
129 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
130 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
131 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
132 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
133 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
134 W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
135 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
136 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
137 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
138 /* ---------------------------------------------- */
139 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
145 * opcodes we'll probably never support:
147 * 6c-6d, e4-e5, ec-ed - in
148 * 6e-6f, e6-e7, ee-ef - out
151 * d6 - illegal instruction
155 * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
157 * invalid opcodes in 64-bit mode:
159 * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
160 * 63 - we support this opcode in x86_64 but not in i386.
162 * opcodes we may need to refine support for:
164 * 0f - 2-byte instructions: For many of these instructions, the validity
165 * depends on the prefix and/or the reg field. On such instructions, we
166 * just consider the opcode combination valid if it corresponds to any
169 * 8f - Group 1 - only reg = 0 is OK
170 * c6-c7 - Group 11 - only reg = 0 is OK
171 * d9-df - fpu insns with some illegal encodings
172 * f2, f3 - repnz, repz prefixes. These are also the first byte for
173 * certain floating-point instructions, such as addsd.
175 * fe - Group 4 - only reg = 0 or 1 is OK
176 * ff - Group 5 - only reg = 0-6 is OK
178 * others -- Do we need to support these?
180 * 0f - (floating-point?) prefetch instructions
181 * 07, 17, 1f - pop es, pop ss, pop ds
182 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
183 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
191 * - Where necessary, examine the modrm byte and allow only valid instructions
192 * in the different Groups and fpu instructions.
195 static bool is_prefix_bad(struct insn *insn)
199 for (i = 0; i < insn->prefixes.nbytes; i++) {
200 switch (insn->prefixes.bytes[i]) {
201 case 0x26: /* INAT_PFX_ES */
202 case 0x2E: /* INAT_PFX_CS */
203 case 0x36: /* INAT_PFX_DS */
204 case 0x3E: /* INAT_PFX_SS */
205 case 0xF0: /* INAT_PFX_LOCK */
212 static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn)
214 insn_init(insn, auprobe->insn, false);
216 /* Skip good instruction prefixes; reject "bad" ones. */
217 insn_get_opcode(insn);
218 if (is_prefix_bad(insn))
221 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_32))
224 if (insn->opcode.nbytes == 2) {
225 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
234 * If arch_uprobe->insn doesn't use rip-relative addressing, return
235 * immediately. Otherwise, rewrite the instruction so that it accesses
236 * its memory operand indirectly through a scratch register. Set
237 * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address
238 * accordingly. (The contents of the scratch register will be saved
239 * before we single-step the modified instruction, and restored
242 * We do this because a rip-relative instruction can access only a
243 * relatively small area (+/- 2 GB from the instruction), and the XOL
244 * area typically lies beyond that area. At least for instructions
245 * that store to memory, we can't execute the original instruction
246 * and "fix things up" later, because the misdirected store could be
249 * Some useful facts about rip-relative instructions:
251 * - There's always a modrm byte.
252 * - There's never a SIB byte.
253 * - The displacement is always 4 bytes.
256 handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
261 if (!insn_rip_relative(insn))
265 * insn_rip_relative() would have decoded rex_prefix, modrm.
266 * Clear REX.b bit (extension of MODRM.rm field):
267 * we want to encode rax/rcx, not r8/r9.
269 if (insn->rex_prefix.nbytes) {
270 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
271 *cursor &= 0xfe; /* Clearing REX.B bit */
275 * Point cursor at the modrm byte. The next 4 bytes are the
276 * displacement. Beyond the displacement, for some instructions,
277 * is the immediate operand.
279 cursor = auprobe->insn + insn_offset_modrm(insn);
280 insn_get_length(insn);
283 * Convert from rip-relative addressing to indirect addressing
284 * via a scratch register. Change the r/m field from 0x5 (%rip)
285 * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field.
287 reg = MODRM_REG(insn);
290 * The register operand (if any) is either the A register
291 * (%rax, %eax, etc.) or (if the 0x4 bit is set in the
292 * REX prefix) %r8. In any case, we know the C register
293 * is NOT the register operand, so we use %rcx (register
294 * #1) for the scratch register.
296 auprobe->fixups = UPROBE_FIX_RIP_CX;
297 /* Change modrm from 00 000 101 to 00 000 001. */
300 /* Use %rax (register #0) for the scratch register. */
301 auprobe->fixups = UPROBE_FIX_RIP_AX;
302 /* Change modrm from 00 xxx 101 to 00 xxx 000 */
303 *cursor = (reg << 3);
306 /* Target address = address of next instruction + (signed) offset */
307 auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value;
309 /* Displacement field is gone; slide immediate field (if any) over. */
310 if (insn->immediate.nbytes) {
312 memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes);
317 * If we're emulating a rip-relative instruction, save the contents
318 * of the scratch register and store the target address in that register.
321 pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
322 struct arch_uprobe_task *autask)
324 if (auprobe->fixups & UPROBE_FIX_RIP_AX) {
325 autask->saved_scratch_register = regs->ax;
326 regs->ax = current->utask->vaddr;
327 regs->ax += auprobe->rip_rela_target_address;
328 } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) {
329 autask->saved_scratch_register = regs->cx;
330 regs->cx = current->utask->vaddr;
331 regs->cx += auprobe->rip_rela_target_address;
336 handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
338 if (auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
339 struct arch_uprobe_task *autask;
341 autask = ¤t->utask->autask;
342 if (auprobe->fixups & UPROBE_FIX_RIP_AX)
343 regs->ax = autask->saved_scratch_register;
345 regs->cx = autask->saved_scratch_register;
348 * The original instruction includes a displacement, and so
349 * is 4 bytes longer than what we've just single-stepped.
350 * Caller may need to apply other fixups to handle stuff
351 * like "jmpq *...(%rip)" and "callq *...(%rip)".
358 static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn)
360 insn_init(insn, auprobe->insn, true);
362 /* Skip good instruction prefixes; reject "bad" ones. */
363 insn_get_opcode(insn);
364 if (is_prefix_bad(insn))
367 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_64))
370 if (insn->opcode.nbytes == 2) {
371 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
377 static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
379 if (mm->context.ia32_compat)
380 return validate_insn_32bits(auprobe, insn);
381 return validate_insn_64bits(auprobe, insn);
385 * No RIP-relative addressing on 32-bit
387 static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
390 static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
391 struct arch_uprobe_task *autask)
394 static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
399 static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
401 return validate_insn_32bits(auprobe, insn);
403 #endif /* CONFIG_X86_64 */
405 struct uprobe_xol_ops {
406 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
407 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
408 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
411 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
413 pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask);
418 * Adjust the return address pushed by a call insn executed out of line.
420 static int adjust_ret_addr(unsigned long sp, long correction)
430 ncopied = copy_from_user(&ra, (void __user *)sp, rasize);
431 if (unlikely(ncopied))
435 ncopied = copy_to_user((void __user *)sp, &ra, rasize);
436 if (unlikely(ncopied))
442 static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
444 struct uprobe_task *utask = current->utask;
445 long correction = (long)(utask->vaddr - utask->xol_vaddr);
448 handle_riprel_post_xol(auprobe, regs, &correction);
449 if (auprobe->fixups & UPROBE_FIX_IP)
450 regs->ip += correction;
452 if (auprobe->fixups & UPROBE_FIX_CALL)
453 ret = adjust_ret_addr(regs->sp, correction);
458 static struct uprobe_xol_ops default_xol_ops = {
459 .pre_xol = default_pre_xol_op,
460 .post_xol = default_post_xol_op,
464 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
465 * @mm: the probed address space.
466 * @arch_uprobe: the probepoint information.
467 * @addr: virtual address at which to install the probepoint
468 * Return 0 on success or a -ve number on error.
470 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
473 bool fix_ip = true, fix_call = false;
476 ret = validate_insn_bits(auprobe, mm, &insn);
481 * Figure out which fixups arch_uprobe_post_xol() will need to perform,
482 * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups
483 * is either zero or it reflects rip-related fixups.
485 handle_riprel_insn(auprobe, &insn);
487 switch (OPCODE1(&insn)) {
488 case 0x9d: /* popf */
489 auprobe->fixups |= UPROBE_FIX_SETF;
491 case 0xc3: /* ret or lret -- ip is correct */
497 case 0xe8: /* call relative - Fix return addr */
500 case 0x9a: /* call absolute - Fix return addr, not ip */
504 case 0xea: /* jmp absolute -- ip is correct */
508 insn_get_modrm(&insn);
509 switch (MODRM_REG(&insn)) {
510 case 2: case 3: /* call or lcall, indirect */
512 case 4: case 5: /* jmp or ljmp, indirect */
521 auprobe->fixups |= UPROBE_FIX_IP;
523 auprobe->fixups |= UPROBE_FIX_CALL;
525 auprobe->ops = &default_xol_ops;
530 * arch_uprobe_pre_xol - prepare to execute out of line.
531 * @auprobe: the probepoint information.
532 * @regs: reflects the saved user state of current task.
534 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
536 struct uprobe_task *utask = current->utask;
538 regs->ip = utask->xol_vaddr;
539 utask->autask.saved_trap_nr = current->thread.trap_nr;
540 current->thread.trap_nr = UPROBE_TRAP_NR;
542 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
543 regs->flags |= X86_EFLAGS_TF;
544 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
545 set_task_blockstep(current, false);
547 if (auprobe->ops->pre_xol)
548 return auprobe->ops->pre_xol(auprobe, regs);
553 * If xol insn itself traps and generates a signal(Say,
554 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
555 * instruction jumps back to its own address. It is assumed that anything
556 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
558 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
559 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
560 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
562 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
564 if (t->thread.trap_nr != UPROBE_TRAP_NR)
571 * Called after single-stepping. To avoid the SMP problems that can
572 * occur when we temporarily put back the original opcode to
573 * single-step, we single-stepped a copy of the instruction.
575 * This function prepares to resume execution after the single-step.
576 * We have to fix things up as follows:
578 * Typically, the new ip is relative to the copied instruction. We need
579 * to make it relative to the original instruction (FIX_IP). Exceptions
580 * are return instructions and absolute or indirect jump or call instructions.
582 * If the single-stepped instruction was a call, the return address that
583 * is atop the stack is the address following the copied instruction. We
584 * need to make it the address following the original instruction (FIX_CALL).
586 * If the original instruction was a rip-relative instruction such as
587 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
588 * instruction using a scratch register -- e.g., "movl %edx,(%rax)".
589 * We need to restore the contents of the scratch register and adjust
590 * the ip, keeping in mind that the instruction we executed is 4 bytes
591 * shorter than the original instruction (since we squeezed out the offset
592 * field). (FIX_RIP_AX or FIX_RIP_CX)
594 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
596 struct uprobe_task *utask = current->utask;
598 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
599 current->thread.trap_nr = utask->autask.saved_trap_nr;
601 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
602 * so we can get an extra SIGTRAP if we do not clear TF. We need
603 * to examine the opcode to make it right.
605 if (utask->autask.saved_tf)
606 send_sig(SIGTRAP, current, 0);
607 else if (!(auprobe->fixups & UPROBE_FIX_SETF))
608 regs->flags &= ~X86_EFLAGS_TF;
610 if (auprobe->ops->post_xol)
611 return auprobe->ops->post_xol(auprobe, regs);
615 /* callback routine for handling exceptions. */
616 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
618 struct die_args *args = data;
619 struct pt_regs *regs = args->regs;
620 int ret = NOTIFY_DONE;
622 /* We are only interested in userspace traps */
623 if (regs && !user_mode_vm(regs))
628 if (uprobe_pre_sstep_notifier(regs))
634 if (uprobe_post_sstep_notifier(regs))
645 * This function gets called when XOL instruction either gets trapped or
646 * the thread has a fatal signal, so reset the instruction pointer to its
649 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
651 struct uprobe_task *utask = current->utask;
653 current->thread.trap_nr = utask->autask.saved_trap_nr;
654 handle_riprel_post_xol(auprobe, regs, NULL);
655 instruction_pointer_set(regs, utask->vaddr);
657 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
658 if (!utask->autask.saved_tf)
659 regs->flags &= ~X86_EFLAGS_TF;
663 * Skip these instructions as per the currently known x86 ISA.
664 * rep=0x66*; nop=0x90
666 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
670 if (auprobe->ops->emulate)
671 return auprobe->ops->emulate(auprobe, regs);
673 /* TODO: move this code into ->emulate() hook */
674 for (i = 0; i < MAX_UINSN_BYTES; i++) {
675 if (auprobe->insn[i] == 0x66)
678 if (auprobe->insn[i] == 0x90) {
688 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
690 bool ret = __skip_sstep(auprobe, regs);
691 if (ret && (regs->flags & X86_EFLAGS_TF))
692 send_sig(SIGTRAP, current, 0);
697 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
700 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
702 rasize = is_ia32_task() ? 4 : 8;
703 ncopied = copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize);
704 if (unlikely(ncopied))
707 /* check whether address has been already hijacked */
708 if (orig_ret_vaddr == trampoline_vaddr)
709 return orig_ret_vaddr;
711 ncopied = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
712 if (likely(!ncopied))
713 return orig_ret_vaddr;
715 if (ncopied != rasize) {
716 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
717 "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
719 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);