2 * User-space Probes (UProbes) for x86
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2008-2011
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/uaccess.h>
29 #include <linux/kdebug.h>
30 #include <asm/processor.h>
33 /* Post-execution fixups. */
35 /* Adjust IP back to vicinity of actual insn */
36 #define UPROBE_FIX_IP 0x1
38 /* Adjust the return address of a call insn */
39 #define UPROBE_FIX_CALL 0x2
41 /* Instruction will modify TF, don't change it */
42 #define UPROBE_FIX_SETF 0x4
44 #define UPROBE_FIX_RIP_AX 0x8000
45 #define UPROBE_FIX_RIP_CX 0x4000
47 #define UPROBE_TRAP_NR UINT_MAX
49 /* Adaptations for mhiramat x86 decoder v14. */
50 #define OPCODE1(insn) ((insn)->opcode.bytes[0])
51 #define OPCODE2(insn) ((insn)->opcode.bytes[1])
52 #define OPCODE3(insn) ((insn)->opcode.bytes[2])
53 #define MODRM_REG(insn) X86_MODRM_REG((insn)->modrm.value)
55 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
56 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
57 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
58 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
59 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
63 * Good-instruction tables for 32-bit apps. This is non-const and volatile
64 * to keep gcc from statically optimizing it out, as variable_test_bit makes
65 * some versions of gcc to think only *(unsigned long*) is used.
67 static volatile u32 good_insns_32[256 / 32] = {
68 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
69 /* ---------------------------------------------- */
70 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
71 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
72 W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
73 W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
74 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
75 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
76 W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
77 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
78 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
79 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
80 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
81 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
82 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
83 W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
84 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
85 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
86 /* ---------------------------------------------- */
87 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
90 /* Using this for both 64-bit and 32-bit apps */
91 static volatile u32 good_2byte_insns[256 / 32] = {
92 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
93 /* ---------------------------------------------- */
94 W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
95 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
96 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
97 W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
98 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
99 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
100 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
101 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
102 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
103 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
104 W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
105 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
106 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
107 W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
108 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
109 W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */
110 /* ---------------------------------------------- */
111 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
114 /* Good-instruction tables for 64-bit apps */
115 static volatile u32 good_insns_64[256 / 32] = {
116 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
117 /* ---------------------------------------------- */
118 W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
119 W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
120 W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
121 W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
122 W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
123 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
124 W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
125 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
126 W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
127 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
128 W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
129 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
130 W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
131 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
132 W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
133 W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */
134 /* ---------------------------------------------- */
135 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
140 * opcodes we'll probably never support:
142 * 6c-6d, e4-e5, ec-ed - in
143 * 6e-6f, e6-e7, ee-ef - out
146 * d6 - illegal instruction
150 * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
152 * invalid opcodes in 64-bit mode:
154 * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
155 * 63 - we support this opcode in x86_64 but not in i386.
157 * opcodes we may need to refine support for:
159 * 0f - 2-byte instructions: For many of these instructions, the validity
160 * depends on the prefix and/or the reg field. On such instructions, we
161 * just consider the opcode combination valid if it corresponds to any
164 * 8f - Group 1 - only reg = 0 is OK
165 * c6-c7 - Group 11 - only reg = 0 is OK
166 * d9-df - fpu insns with some illegal encodings
167 * f2, f3 - repnz, repz prefixes. These are also the first byte for
168 * certain floating-point instructions, such as addsd.
170 * fe - Group 4 - only reg = 0 or 1 is OK
171 * ff - Group 5 - only reg = 0-6 is OK
173 * others -- Do we need to support these?
175 * 0f - (floating-point?) prefetch instructions
176 * 07, 17, 1f - pop es, pop ss, pop ds
177 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
178 * but 64 and 65 (fs: and gs:) seem to be used, so we support them
186 * - Where necessary, examine the modrm byte and allow only valid instructions
187 * in the different Groups and fpu instructions.
190 static bool is_prefix_bad(struct insn *insn)
194 for (i = 0; i < insn->prefixes.nbytes; i++) {
195 switch (insn->prefixes.bytes[i]) {
196 case 0x26: /* INAT_PFX_ES */
197 case 0x2E: /* INAT_PFX_CS */
198 case 0x36: /* INAT_PFX_DS */
199 case 0x3E: /* INAT_PFX_SS */
200 case 0xF0: /* INAT_PFX_LOCK */
207 static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
209 u32 volatile *good_insns;
211 insn_init(insn, auprobe->insn, x86_64);
212 /* has the side-effect of processing the entire instruction */
213 insn_get_length(insn);
214 if (WARN_ON_ONCE(!insn_complete(insn)))
217 if (is_prefix_bad(insn))
221 good_insns = good_insns_64;
223 good_insns = good_insns_32;
225 if (test_bit(OPCODE1(insn), (unsigned long *)good_insns))
228 if (insn->opcode.nbytes == 2) {
229 if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns))
237 static inline bool is_64bit_mm(struct mm_struct *mm)
239 return !config_enabled(CONFIG_IA32_EMULATION) ||
240 !mm->context.ia32_compat;
243 * If arch_uprobe->insn doesn't use rip-relative addressing, return
244 * immediately. Otherwise, rewrite the instruction so that it accesses
245 * its memory operand indirectly through a scratch register. Set
246 * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address
247 * accordingly. (The contents of the scratch register will be saved
248 * before we single-step the modified instruction, and restored
251 * We do this because a rip-relative instruction can access only a
252 * relatively small area (+/- 2 GB from the instruction), and the XOL
253 * area typically lies beyond that area. At least for instructions
254 * that store to memory, we can't execute the original instruction
255 * and "fix things up" later, because the misdirected store could be
258 * Some useful facts about rip-relative instructions:
260 * - There's always a modrm byte.
261 * - There's never a SIB byte.
262 * - The displacement is always 4 bytes.
265 handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
270 if (!insn_rip_relative(insn))
274 * insn_rip_relative() would have decoded rex_prefix, modrm.
275 * Clear REX.b bit (extension of MODRM.rm field):
276 * we want to encode rax/rcx, not r8/r9.
278 if (insn->rex_prefix.nbytes) {
279 cursor = auprobe->insn + insn_offset_rex_prefix(insn);
280 *cursor &= 0xfe; /* Clearing REX.B bit */
284 * Point cursor at the modrm byte. The next 4 bytes are the
285 * displacement. Beyond the displacement, for some instructions,
286 * is the immediate operand.
288 cursor = auprobe->insn + insn_offset_modrm(insn);
290 * Convert from rip-relative addressing to indirect addressing
291 * via a scratch register. Change the r/m field from 0x5 (%rip)
292 * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field.
294 reg = MODRM_REG(insn);
297 * The register operand (if any) is either the A register
298 * (%rax, %eax, etc.) or (if the 0x4 bit is set in the
299 * REX prefix) %r8. In any case, we know the C register
300 * is NOT the register operand, so we use %rcx (register
301 * #1) for the scratch register.
303 auprobe->fixups = UPROBE_FIX_RIP_CX;
304 /* Change modrm from 00 000 101 to 00 000 001. */
307 /* Use %rax (register #0) for the scratch register. */
308 auprobe->fixups = UPROBE_FIX_RIP_AX;
309 /* Change modrm from 00 xxx 101 to 00 xxx 000 */
310 *cursor = (reg << 3);
313 /* Target address = address of next instruction + (signed) offset */
314 auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value;
316 /* Displacement field is gone; slide immediate field (if any) over. */
317 if (insn->immediate.nbytes) {
319 memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes);
324 * If we're emulating a rip-relative instruction, save the contents
325 * of the scratch register and store the target address in that register.
328 pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
329 struct arch_uprobe_task *autask)
331 if (auprobe->fixups & UPROBE_FIX_RIP_AX) {
332 autask->saved_scratch_register = regs->ax;
333 regs->ax = current->utask->vaddr;
334 regs->ax += auprobe->rip_rela_target_address;
335 } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) {
336 autask->saved_scratch_register = regs->cx;
337 regs->cx = current->utask->vaddr;
338 regs->cx += auprobe->rip_rela_target_address;
343 handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction)
345 if (auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) {
346 struct arch_uprobe_task *autask;
348 autask = ¤t->utask->autask;
349 if (auprobe->fixups & UPROBE_FIX_RIP_AX)
350 regs->ax = autask->saved_scratch_register;
352 regs->cx = autask->saved_scratch_register;
355 * The original instruction includes a displacement, and so
356 * is 4 bytes longer than what we've just single-stepped.
357 * Caller may need to apply other fixups to handle stuff
358 * like "jmpq *...(%rip)" and "callq *...(%rip)".
365 static inline bool is_64bit_mm(struct mm_struct *mm)
370 * No RIP-relative addressing on 32-bit
372 static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn)
375 static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs,
376 struct arch_uprobe_task *autask)
379 static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs,
383 #endif /* CONFIG_X86_64 */
385 struct uprobe_xol_ops {
386 bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
387 int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
388 int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
391 static inline int sizeof_long(void)
393 return is_ia32_task() ? 4 : 8;
396 static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
398 pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask);
403 * Adjust the return address pushed by a call insn executed out of line.
405 static int adjust_ret_addr(unsigned long sp, long correction)
407 int rasize = sizeof_long();
410 if (copy_from_user(&ra, (void __user *)sp, rasize))
414 if (copy_to_user((void __user *)sp, &ra, rasize))
420 static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
422 struct uprobe_task *utask = current->utask;
423 long correction = (long)(utask->vaddr - utask->xol_vaddr);
425 handle_riprel_post_xol(auprobe, regs, &correction);
426 if (auprobe->fixups & UPROBE_FIX_IP)
427 regs->ip += correction;
429 if (auprobe->fixups & UPROBE_FIX_CALL) {
430 if (adjust_ret_addr(regs->sp, correction)) {
431 regs->sp += sizeof_long();
439 static struct uprobe_xol_ops default_xol_ops = {
440 .pre_xol = default_pre_xol_op,
441 .post_xol = default_post_xol_op,
444 static bool branch_is_call(struct arch_uprobe *auprobe)
446 return auprobe->branch.opc1 == 0xe8;
450 COND(70, 71, XF(OF)) \
451 COND(72, 73, XF(CF)) \
452 COND(74, 75, XF(ZF)) \
453 COND(78, 79, XF(SF)) \
454 COND(7a, 7b, XF(PF)) \
455 COND(76, 77, XF(CF) || XF(ZF)) \
456 COND(7c, 7d, XF(SF) != XF(OF)) \
457 COND(7e, 7f, XF(ZF) || XF(SF) != XF(OF))
459 #define COND(op_y, op_n, expr) \
460 case 0x ## op_y: DO((expr) != 0) \
461 case 0x ## op_n: DO((expr) == 0)
463 #define XF(xf) (!!(flags & X86_EFLAGS_ ## xf))
465 static bool is_cond_jmp_opcode(u8 opcode)
478 static bool check_jmp_cond(struct arch_uprobe *auprobe, struct pt_regs *regs)
480 unsigned long flags = regs->flags;
482 switch (auprobe->branch.opc1) {
488 default: /* not a conditional jmp */
497 static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
499 unsigned long new_ip = regs->ip += auprobe->branch.ilen;
500 unsigned long offs = (long)auprobe->branch.offs;
502 if (branch_is_call(auprobe)) {
503 unsigned long new_sp = regs->sp - sizeof_long();
505 * If it fails we execute this (mangled, see the comment in
506 * branch_clear_offset) insn out-of-line. In the likely case
507 * this should trigger the trap, and the probed application
508 * should die or restart the same insn after it handles the
509 * signal, arch_uprobe_post_xol() won't be even called.
511 * But there is corner case, see the comment in ->post_xol().
513 if (copy_to_user((void __user *)new_sp, &new_ip, sizeof_long()))
516 } else if (!check_jmp_cond(auprobe, regs)) {
520 regs->ip = new_ip + offs;
524 static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
526 BUG_ON(!branch_is_call(auprobe));
528 * We can only get here if branch_emulate_op() failed to push the ret
529 * address _and_ another thread expanded our stack before the (mangled)
530 * "call" insn was executed out-of-line. Just restore ->sp and restart.
531 * We could also restore ->ip and try to call branch_emulate_op() again.
533 regs->sp += sizeof_long();
537 static void branch_clear_offset(struct arch_uprobe *auprobe, struct insn *insn)
540 * Turn this insn into "call 1f; 1:", this is what we will execute
541 * out-of-line if ->emulate() fails. We only need this to generate
542 * a trap, so that the probed task receives the correct signal with
543 * the properly filled siginfo.
545 * But see the comment in ->post_xol(), in the unlikely case it can
546 * succeed. So we need to ensure that the new ->ip can not fall into
547 * the non-canonical area and trigger #GP.
549 * We could turn it into (say) "pushf", but then we would need to
550 * divorce ->insn[] and ->ixol[]. We need to preserve the 1st byte
551 * of ->insn[] for set_orig_insn().
553 memset(auprobe->insn + insn_offset_immediate(insn),
554 0, insn->immediate.nbytes);
557 static struct uprobe_xol_ops branch_xol_ops = {
558 .emulate = branch_emulate_op,
559 .post_xol = branch_post_xol_op,
562 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
563 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
565 u8 opc1 = OPCODE1(insn);
569 case 0xeb: /* jmp 8 */
570 case 0xe9: /* jmp 32 */
571 case 0x90: /* prefix* + nop; same as jmp with .offs = 0 */
574 case 0xe8: /* call relative */
575 branch_clear_offset(auprobe, insn);
579 if (insn->opcode.nbytes != 2)
582 * If it is a "near" conditional jmp, OPCODE2() - 0x10 matches
583 * OPCODE1() of the "short" jmp which checks the same condition.
585 opc1 = OPCODE2(insn) - 0x10;
587 if (!is_cond_jmp_opcode(opc1))
592 * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported.
593 * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix.
594 * No one uses these insns, reject any branch insns with such prefix.
596 for (i = 0; i < insn->prefixes.nbytes; i++) {
597 if (insn->prefixes.bytes[i] == 0x66)
601 auprobe->branch.opc1 = opc1;
602 auprobe->branch.ilen = insn->length;
603 auprobe->branch.offs = insn->immediate.value;
605 auprobe->ops = &branch_xol_ops;
610 * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
611 * @mm: the probed address space.
612 * @arch_uprobe: the probepoint information.
613 * @addr: virtual address at which to install the probepoint
614 * Return 0 on success or a -ve number on error.
616 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
619 bool fix_ip = true, fix_call = false;
622 ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
626 ret = branch_setup_xol_ops(auprobe, &insn);
631 * Figure out which fixups arch_uprobe_post_xol() will need to perform,
632 * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups
633 * is either zero or it reflects rip-related fixups.
635 switch (OPCODE1(&insn)) {
636 case 0x9d: /* popf */
637 auprobe->fixups |= UPROBE_FIX_SETF;
639 case 0xc3: /* ret or lret -- ip is correct */
645 case 0x9a: /* call absolute - Fix return addr, not ip */
649 case 0xea: /* jmp absolute -- ip is correct */
653 switch (MODRM_REG(&insn)) {
654 case 2: case 3: /* call or lcall, indirect */
656 case 4: case 5: /* jmp or ljmp, indirect */
661 handle_riprel_insn(auprobe, &insn);
665 auprobe->fixups |= UPROBE_FIX_IP;
667 auprobe->fixups |= UPROBE_FIX_CALL;
669 auprobe->ops = &default_xol_ops;
674 * arch_uprobe_pre_xol - prepare to execute out of line.
675 * @auprobe: the probepoint information.
676 * @regs: reflects the saved user state of current task.
678 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
680 struct uprobe_task *utask = current->utask;
682 regs->ip = utask->xol_vaddr;
683 utask->autask.saved_trap_nr = current->thread.trap_nr;
684 current->thread.trap_nr = UPROBE_TRAP_NR;
686 utask->autask.saved_tf = !!(regs->flags & X86_EFLAGS_TF);
687 regs->flags |= X86_EFLAGS_TF;
688 if (test_tsk_thread_flag(current, TIF_BLOCKSTEP))
689 set_task_blockstep(current, false);
691 if (auprobe->ops->pre_xol)
692 return auprobe->ops->pre_xol(auprobe, regs);
697 * If xol insn itself traps and generates a signal(Say,
698 * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
699 * instruction jumps back to its own address. It is assumed that anything
700 * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
702 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
703 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
704 * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
706 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
708 if (t->thread.trap_nr != UPROBE_TRAP_NR)
715 * Called after single-stepping. To avoid the SMP problems that can
716 * occur when we temporarily put back the original opcode to
717 * single-step, we single-stepped a copy of the instruction.
719 * This function prepares to resume execution after the single-step.
720 * We have to fix things up as follows:
722 * Typically, the new ip is relative to the copied instruction. We need
723 * to make it relative to the original instruction (FIX_IP). Exceptions
724 * are return instructions and absolute or indirect jump or call instructions.
726 * If the single-stepped instruction was a call, the return address that
727 * is atop the stack is the address following the copied instruction. We
728 * need to make it the address following the original instruction (FIX_CALL).
730 * If the original instruction was a rip-relative instruction such as
731 * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent
732 * instruction using a scratch register -- e.g., "movl %edx,(%rax)".
733 * We need to restore the contents of the scratch register and adjust
734 * the ip, keeping in mind that the instruction we executed is 4 bytes
735 * shorter than the original instruction (since we squeezed out the offset
736 * field). (FIX_RIP_AX or FIX_RIP_CX)
738 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
740 struct uprobe_task *utask = current->utask;
742 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
744 if (auprobe->ops->post_xol) {
745 int err = auprobe->ops->post_xol(auprobe, regs);
747 arch_uprobe_abort_xol(auprobe, regs);
749 * Restart the probed insn. ->post_xol() must ensure
750 * this is really possible if it returns -ERESTART.
752 if (err == -ERESTART)
758 current->thread.trap_nr = utask->autask.saved_trap_nr;
760 * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP
761 * so we can get an extra SIGTRAP if we do not clear TF. We need
762 * to examine the opcode to make it right.
764 if (utask->autask.saved_tf)
765 send_sig(SIGTRAP, current, 0);
766 else if (!(auprobe->fixups & UPROBE_FIX_SETF))
767 regs->flags &= ~X86_EFLAGS_TF;
772 /* callback routine for handling exceptions. */
773 int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data)
775 struct die_args *args = data;
776 struct pt_regs *regs = args->regs;
777 int ret = NOTIFY_DONE;
779 /* We are only interested in userspace traps */
780 if (regs && !user_mode_vm(regs))
785 if (uprobe_pre_sstep_notifier(regs))
791 if (uprobe_post_sstep_notifier(regs))
802 * This function gets called when XOL instruction either gets trapped or
803 * the thread has a fatal signal, or if arch_uprobe_post_xol() failed.
804 * Reset the instruction pointer to its probed address for the potential
805 * restart or for post mortem analysis.
807 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
809 struct uprobe_task *utask = current->utask;
811 current->thread.trap_nr = utask->autask.saved_trap_nr;
812 handle_riprel_post_xol(auprobe, regs, NULL);
813 instruction_pointer_set(regs, utask->vaddr);
815 /* clear TF if it was set by us in arch_uprobe_pre_xol() */
816 if (!utask->autask.saved_tf)
817 regs->flags &= ~X86_EFLAGS_TF;
820 static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
822 if (auprobe->ops->emulate)
823 return auprobe->ops->emulate(auprobe, regs);
827 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
829 bool ret = __skip_sstep(auprobe, regs);
830 if (ret && (regs->flags & X86_EFLAGS_TF))
831 send_sig(SIGTRAP, current, 0);
836 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
838 int rasize = sizeof_long(), nleft;
839 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
841 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
844 /* check whether address has been already hijacked */
845 if (orig_ret_vaddr == trampoline_vaddr)
846 return orig_ret_vaddr;
848 nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize);
850 return orig_ret_vaddr;
852 if (nleft != rasize) {
853 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
854 "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
856 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);