2 * Based on arch/arm/kernel/ptrace.c
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
27 #include <linux/smp.h>
28 #include <linux/ptrace.h>
29 #include <linux/user.h>
30 #include <linux/security.h>
31 #include <linux/init.h>
32 #include <linux/signal.h>
33 #include <linux/uaccess.h>
34 #include <linux/perf_event.h>
35 #include <linux/hw_breakpoint.h>
36 #include <linux/regset.h>
37 #include <linux/tracehook.h>
38 #include <linux/elf.h>
40 #include <asm/compat.h>
41 #include <asm/debug-monitors.h>
42 #include <asm/pgtable.h>
43 #include <asm/syscall.h>
44 #include <asm/traps.h>
45 #include <asm/system_misc.h>
48 * TODO: does not yet catch signals sent when the child dies.
49 * in exit.c or in signal.c.
53 * Called by kernel/ptrace.c when detaching..
55 void ptrace_disable(struct task_struct *child)
59 #ifdef CONFIG_HAVE_HW_BREAKPOINT
61 * Handle hitting a HW-breakpoint.
63 static void ptrace_hbptriggered(struct perf_event *bp,
64 struct perf_sample_data *data,
67 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
71 .si_code = TRAP_HWBKPT,
72 .si_addr = (void __user *)(bkpt->trigger),
78 if (!is_compat_task())
81 for (i = 0; i < ARM_MAX_BRP; ++i) {
82 if (current->thread.debug.hbp_break[i] == bp) {
83 info.si_errno = (i << 1) + 1;
87 for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
88 if (current->thread.debug.hbp_watch[i] == bp) {
89 info.si_errno = -((i << 1) + 1);
96 force_sig_info(SIGTRAP, &info, current);
100 * Unregister breakpoints from this task and reset the pointers in
103 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
106 struct thread_struct *t = &tsk->thread;
108 for (i = 0; i < ARM_MAX_BRP; i++) {
109 if (t->debug.hbp_break[i]) {
110 unregister_hw_breakpoint(t->debug.hbp_break[i]);
111 t->debug.hbp_break[i] = NULL;
115 for (i = 0; i < ARM_MAX_WRP; i++) {
116 if (t->debug.hbp_watch[i]) {
117 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
118 t->debug.hbp_watch[i] = NULL;
123 void ptrace_hw_copy_thread(struct task_struct *tsk)
125 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
128 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
129 struct task_struct *tsk,
132 struct perf_event *bp = ERR_PTR(-EINVAL);
135 case NT_ARM_HW_BREAK:
136 if (idx < ARM_MAX_BRP)
137 bp = tsk->thread.debug.hbp_break[idx];
139 case NT_ARM_HW_WATCH:
140 if (idx < ARM_MAX_WRP)
141 bp = tsk->thread.debug.hbp_watch[idx];
148 static int ptrace_hbp_set_event(unsigned int note_type,
149 struct task_struct *tsk,
151 struct perf_event *bp)
156 case NT_ARM_HW_BREAK:
157 if (idx < ARM_MAX_BRP) {
158 tsk->thread.debug.hbp_break[idx] = bp;
162 case NT_ARM_HW_WATCH:
163 if (idx < ARM_MAX_WRP) {
164 tsk->thread.debug.hbp_watch[idx] = bp;
173 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
174 struct task_struct *tsk,
177 struct perf_event *bp;
178 struct perf_event_attr attr;
182 case NT_ARM_HW_BREAK:
183 type = HW_BREAKPOINT_X;
185 case NT_ARM_HW_WATCH:
186 type = HW_BREAKPOINT_RW;
189 return ERR_PTR(-EINVAL);
192 ptrace_breakpoint_init(&attr);
195 * Initialise fields to sane defaults
196 * (i.e. values that will pass validation).
199 attr.bp_len = HW_BREAKPOINT_LEN_4;
203 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
207 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
214 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
215 struct arch_hw_breakpoint_ctrl ctrl,
216 struct perf_event_attr *attr)
218 int err, len, type, disabled = !ctrl.enabled;
222 type = HW_BREAKPOINT_EMPTY;
224 err = arch_bp_generic_fields(ctrl, &len, &type);
229 case NT_ARM_HW_BREAK:
230 if ((type & HW_BREAKPOINT_X) != type)
233 case NT_ARM_HW_WATCH:
234 if ((type & HW_BREAKPOINT_RW) != type)
243 attr->bp_type = type;
244 attr->disabled = disabled;
249 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
255 case NT_ARM_HW_BREAK:
256 num = hw_breakpoint_slots(TYPE_INST);
258 case NT_ARM_HW_WATCH:
259 num = hw_breakpoint_slots(TYPE_DATA);
265 reg |= debug_monitors_arch();
273 static int ptrace_hbp_get_ctrl(unsigned int note_type,
274 struct task_struct *tsk,
278 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
283 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
287 static int ptrace_hbp_get_addr(unsigned int note_type,
288 struct task_struct *tsk,
292 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
297 *addr = bp ? bp->attr.bp_addr : 0;
301 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
302 struct task_struct *tsk,
305 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
308 bp = ptrace_hbp_create(note_type, tsk, idx);
313 static int ptrace_hbp_set_ctrl(unsigned int note_type,
314 struct task_struct *tsk,
319 struct perf_event *bp;
320 struct perf_event_attr attr;
321 struct arch_hw_breakpoint_ctrl ctrl;
323 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
330 decode_ctrl_reg(uctrl, &ctrl);
331 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
335 return modify_user_hw_breakpoint(bp, &attr);
338 static int ptrace_hbp_set_addr(unsigned int note_type,
339 struct task_struct *tsk,
344 struct perf_event *bp;
345 struct perf_event_attr attr;
347 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
355 err = modify_user_hw_breakpoint(bp, &attr);
359 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
360 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
361 #define PTRACE_HBP_PAD_SZ sizeof(u32)
363 static int hw_break_get(struct task_struct *target,
364 const struct user_regset *regset,
365 unsigned int pos, unsigned int count,
366 void *kbuf, void __user *ubuf)
368 unsigned int note_type = regset->core_note_type;
369 int ret, idx = 0, offset, limit;
374 ret = ptrace_hbp_get_resource_info(note_type, &info);
378 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
384 offset = offsetof(struct user_hwdebug_state, pad);
385 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
386 offset + PTRACE_HBP_PAD_SZ);
390 /* (address, ctrl) registers */
391 offset = offsetof(struct user_hwdebug_state, dbg_regs);
392 limit = regset->n * regset->size;
393 while (count && offset < limit) {
394 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
397 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
398 offset, offset + PTRACE_HBP_ADDR_SZ);
401 offset += PTRACE_HBP_ADDR_SZ;
403 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
406 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
407 offset, offset + PTRACE_HBP_CTRL_SZ);
410 offset += PTRACE_HBP_CTRL_SZ;
412 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
414 offset + PTRACE_HBP_PAD_SZ);
417 offset += PTRACE_HBP_PAD_SZ;
424 static int hw_break_set(struct task_struct *target,
425 const struct user_regset *regset,
426 unsigned int pos, unsigned int count,
427 const void *kbuf, const void __user *ubuf)
429 unsigned int note_type = regset->core_note_type;
430 int ret, idx = 0, offset, limit;
434 /* Resource info and pad */
435 offset = offsetof(struct user_hwdebug_state, dbg_regs);
436 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
440 /* (address, ctrl) registers */
441 limit = regset->n * regset->size;
442 while (count && offset < limit) {
443 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
444 offset, offset + PTRACE_HBP_ADDR_SZ);
447 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
450 offset += PTRACE_HBP_ADDR_SZ;
452 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
453 offset, offset + PTRACE_HBP_CTRL_SZ);
456 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
459 offset += PTRACE_HBP_CTRL_SZ;
461 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
463 offset + PTRACE_HBP_PAD_SZ);
466 offset += PTRACE_HBP_PAD_SZ;
472 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
474 static int gpr_get(struct task_struct *target,
475 const struct user_regset *regset,
476 unsigned int pos, unsigned int count,
477 void *kbuf, void __user *ubuf)
479 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
480 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
483 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
484 unsigned int pos, unsigned int count,
485 const void *kbuf, const void __user *ubuf)
488 struct user_pt_regs newregs;
490 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
494 if (!valid_user_regs(&newregs))
497 task_pt_regs(target)->user_regs = newregs;
502 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
504 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
505 unsigned int pos, unsigned int count,
506 void *kbuf, void __user *ubuf)
508 struct user_fpsimd_state *uregs;
509 uregs = &target->thread.fpsimd_state.user_fpsimd;
510 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
513 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
514 unsigned int pos, unsigned int count,
515 const void *kbuf, const void __user *ubuf)
518 struct user_fpsimd_state newstate;
520 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
524 target->thread.fpsimd_state.user_fpsimd = newstate;
525 fpsimd_flush_task_state(target);
529 static int tls_get(struct task_struct *target, const struct user_regset *regset,
530 unsigned int pos, unsigned int count,
531 void *kbuf, void __user *ubuf)
533 unsigned long *tls = &target->thread.tp_value;
534 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
537 static int tls_set(struct task_struct *target, const struct user_regset *regset,
538 unsigned int pos, unsigned int count,
539 const void *kbuf, const void __user *ubuf)
544 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
548 target->thread.tp_value = tls;
552 enum aarch64_regset {
556 #ifdef CONFIG_HAVE_HW_BREAKPOINT
562 static const struct user_regset aarch64_regsets[] = {
564 .core_note_type = NT_PRSTATUS,
565 .n = sizeof(struct user_pt_regs) / sizeof(u64),
567 .align = sizeof(u64),
572 .core_note_type = NT_PRFPREG,
573 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
575 * We pretend we have 32-bit registers because the fpsr and
576 * fpcr are 32-bits wide.
579 .align = sizeof(u32),
584 .core_note_type = NT_ARM_TLS,
586 .size = sizeof(void *),
587 .align = sizeof(void *),
591 #ifdef CONFIG_HAVE_HW_BREAKPOINT
592 [REGSET_HW_BREAK] = {
593 .core_note_type = NT_ARM_HW_BREAK,
594 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
596 .align = sizeof(u32),
600 [REGSET_HW_WATCH] = {
601 .core_note_type = NT_ARM_HW_WATCH,
602 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
604 .align = sizeof(u32),
611 static const struct user_regset_view user_aarch64_view = {
612 .name = "aarch64", .e_machine = EM_AARCH64,
613 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
617 #include <linux/compat.h>
624 static int compat_gpr_get(struct task_struct *target,
625 const struct user_regset *regset,
626 unsigned int pos, unsigned int count,
627 void *kbuf, void __user *ubuf)
630 unsigned int i, start, num_regs;
632 /* Calculate the number of AArch32 registers contained in count */
633 num_regs = count / regset->size;
635 /* Convert pos into an register number */
636 start = pos / regset->size;
638 if (start + num_regs > regset->n)
641 for (i = 0; i < num_regs; ++i) {
642 unsigned int idx = start + i;
647 reg = (void *)&task_pt_regs(target)->pc;
650 reg = (void *)&task_pt_regs(target)->pstate;
653 reg = (void *)&task_pt_regs(target)->orig_x0;
656 reg = (void *)&task_pt_regs(target)->regs[idx];
659 ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t));
664 ubuf += sizeof(compat_ulong_t);
670 static int compat_gpr_set(struct task_struct *target,
671 const struct user_regset *regset,
672 unsigned int pos, unsigned int count,
673 const void *kbuf, const void __user *ubuf)
675 struct pt_regs newregs;
677 unsigned int i, start, num_regs;
679 /* Calculate the number of AArch32 registers contained in count */
680 num_regs = count / regset->size;
682 /* Convert pos into an register number */
683 start = pos / regset->size;
685 if (start + num_regs > regset->n)
688 newregs = *task_pt_regs(target);
690 for (i = 0; i < num_regs; ++i) {
691 unsigned int idx = start + i;
696 reg = (void *)&newregs.pc;
699 reg = (void *)&newregs.pstate;
702 reg = (void *)&newregs.orig_x0;
705 reg = (void *)&newregs.regs[idx];
708 ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t));
713 ubuf += sizeof(compat_ulong_t);
716 if (valid_user_regs(&newregs.user_regs))
717 *task_pt_regs(target) = newregs;
725 static int compat_vfp_get(struct task_struct *target,
726 const struct user_regset *regset,
727 unsigned int pos, unsigned int count,
728 void *kbuf, void __user *ubuf)
730 struct user_fpsimd_state *uregs;
731 compat_ulong_t fpscr;
734 uregs = &target->thread.fpsimd_state.user_fpsimd;
737 * The VFP registers are packed into the fpsimd_state, so they all sit
738 * nicely together for us. We just need to create the fpscr separately.
740 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
741 VFP_STATE_SIZE - sizeof(compat_ulong_t));
744 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
745 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
746 ret = put_user(fpscr, (compat_ulong_t *)ubuf);
752 static int compat_vfp_set(struct task_struct *target,
753 const struct user_regset *regset,
754 unsigned int pos, unsigned int count,
755 const void *kbuf, const void __user *ubuf)
757 struct user_fpsimd_state *uregs;
758 compat_ulong_t fpscr;
761 if (pos + count > VFP_STATE_SIZE)
764 uregs = &target->thread.fpsimd_state.user_fpsimd;
766 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
767 VFP_STATE_SIZE - sizeof(compat_ulong_t));
770 ret = get_user(fpscr, (compat_ulong_t *)ubuf);
771 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
772 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
775 fpsimd_flush_task_state(target);
779 static const struct user_regset aarch32_regsets[] = {
780 [REGSET_COMPAT_GPR] = {
781 .core_note_type = NT_PRSTATUS,
782 .n = COMPAT_ELF_NGREG,
783 .size = sizeof(compat_elf_greg_t),
784 .align = sizeof(compat_elf_greg_t),
785 .get = compat_gpr_get,
786 .set = compat_gpr_set
788 [REGSET_COMPAT_VFP] = {
789 .core_note_type = NT_ARM_VFP,
790 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
791 .size = sizeof(compat_ulong_t),
792 .align = sizeof(compat_ulong_t),
793 .get = compat_vfp_get,
794 .set = compat_vfp_set
798 static const struct user_regset_view user_aarch32_view = {
799 .name = "aarch32", .e_machine = EM_ARM,
800 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
803 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
804 compat_ulong_t __user *ret)
811 if (off == COMPAT_PT_TEXT_ADDR)
812 tmp = tsk->mm->start_code;
813 else if (off == COMPAT_PT_DATA_ADDR)
814 tmp = tsk->mm->start_data;
815 else if (off == COMPAT_PT_TEXT_END_ADDR)
816 tmp = tsk->mm->end_code;
817 else if (off < sizeof(compat_elf_gregset_t))
818 return copy_regset_to_user(tsk, &user_aarch32_view,
819 REGSET_COMPAT_GPR, off,
820 sizeof(compat_ulong_t), ret);
821 else if (off >= COMPAT_USER_SZ)
826 return put_user(tmp, ret);
829 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
834 if (off & 3 || off >= COMPAT_USER_SZ)
837 if (off >= sizeof(compat_elf_gregset_t))
840 ret = copy_regset_from_user(tsk, &user_aarch32_view,
841 REGSET_COMPAT_GPR, off,
842 sizeof(compat_ulong_t),
847 #ifdef CONFIG_HAVE_HW_BREAKPOINT
850 * Convert a virtual register number into an index for a thread_info
851 * breakpoint array. Breakpoints are identified using positive numbers
852 * whilst watchpoints are negative. The registers are laid out as pairs
853 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
854 * Register 0 is reserved for describing resource information.
856 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
858 return (abs(num) - 1) >> 1;
861 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
863 u8 num_brps, num_wrps, debug_arch, wp_len;
866 num_brps = hw_breakpoint_slots(TYPE_INST);
867 num_wrps = hw_breakpoint_slots(TYPE_DATA);
869 debug_arch = debug_monitors_arch();
883 static int compat_ptrace_hbp_get(unsigned int note_type,
884 struct task_struct *tsk,
891 int err, idx = compat_ptrace_hbp_num_to_idx(num);;
894 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
897 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
904 static int compat_ptrace_hbp_set(unsigned int note_type,
905 struct task_struct *tsk,
912 int err, idx = compat_ptrace_hbp_num_to_idx(num);
916 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
919 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
925 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
926 compat_ulong_t __user *data)
930 mm_segment_t old_fs = get_fs();
935 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
937 } else if (num == 0) {
938 ret = compat_ptrace_hbp_get_resource_info(&kdata);
941 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
946 ret = put_user(kdata, data);
951 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
952 compat_ulong_t __user *data)
956 mm_segment_t old_fs = get_fs();
961 ret = get_user(kdata, data);
967 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
969 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
974 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
976 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
977 compat_ulong_t caddr, compat_ulong_t cdata)
979 unsigned long addr = caddr;
980 unsigned long data = cdata;
981 void __user *datap = compat_ptr(data);
986 ret = compat_ptrace_read_user(child, addr, datap);
990 ret = compat_ptrace_write_user(child, addr, data);
993 case COMPAT_PTRACE_GETREGS:
994 ret = copy_regset_to_user(child,
997 0, sizeof(compat_elf_gregset_t),
1001 case COMPAT_PTRACE_SETREGS:
1002 ret = copy_regset_from_user(child,
1005 0, sizeof(compat_elf_gregset_t),
1009 case COMPAT_PTRACE_GET_THREAD_AREA:
1010 ret = put_user((compat_ulong_t)child->thread.tp_value,
1011 (compat_ulong_t __user *)datap);
1014 case COMPAT_PTRACE_SET_SYSCALL:
1015 task_pt_regs(child)->syscallno = data;
1019 case COMPAT_PTRACE_GETVFPREGS:
1020 ret = copy_regset_to_user(child,
1027 case COMPAT_PTRACE_SETVFPREGS:
1028 ret = copy_regset_from_user(child,
1035 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1036 case COMPAT_PTRACE_GETHBPREGS:
1037 ret = compat_ptrace_gethbpregs(child, addr, datap);
1040 case COMPAT_PTRACE_SETHBPREGS:
1041 ret = compat_ptrace_sethbpregs(child, addr, datap);
1046 ret = compat_ptrace_request(child, request, addr,
1053 #endif /* CONFIG_COMPAT */
1055 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1057 #ifdef CONFIG_COMPAT
1058 if (is_compat_thread(task_thread_info(task)))
1059 return &user_aarch32_view;
1061 return &user_aarch64_view;
1064 long arch_ptrace(struct task_struct *child, long request,
1065 unsigned long addr, unsigned long data)
1070 case PTRACE_SET_SYSCALL:
1071 task_pt_regs(child)->syscallno = data;
1075 ret = ptrace_request(child, request, addr, data);
1082 enum ptrace_syscall_dir {
1083 PTRACE_SYSCALL_ENTER = 0,
1084 PTRACE_SYSCALL_EXIT,
1087 static void tracehook_report_syscall(struct pt_regs *regs,
1088 enum ptrace_syscall_dir dir)
1091 unsigned long saved_reg;
1094 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1095 * used to denote syscall entry/exit:
1097 regno = (is_compat_task() ? 12 : 7);
1098 saved_reg = regs->regs[regno];
1099 regs->regs[regno] = dir;
1101 if (dir == PTRACE_SYSCALL_EXIT)
1102 tracehook_report_syscall_exit(regs, 0);
1103 else if (tracehook_report_syscall_entry(regs))
1104 regs->syscallno = ~0UL;
1106 regs->regs[regno] = saved_reg;
1109 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1111 unsigned int saved_syscallno = regs->syscallno;
1113 if (test_thread_flag(TIF_SYSCALL_TRACE))
1114 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1116 if (IS_SKIP_SYSCALL(regs->syscallno)) {
1118 * RESTRICTION: we can't modify a return value of user
1119 * issued syscall(-1) here. In order to ease this flavor,
1120 * we need to treat whatever value in x0 as a return value,
1121 * but this might result in a bogus value being returned.
1124 * NOTE: syscallno may also be set to -1 if fatal signal is
1125 * detected in tracehook_report_syscall_entry(), but since
1126 * a value set to x0 here is not used in this case, we may
1129 if (!test_thread_flag(TIF_SYSCALL_TRACE) ||
1130 (IS_SKIP_SYSCALL(saved_syscallno)))
1131 regs->regs[0] = -ENOSYS;
1134 audit_syscall_entry(syscall_get_arch(), regs->syscallno,
1135 regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
1137 return regs->syscallno;
1140 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1142 audit_syscall_exit(regs);
1144 if (test_thread_flag(TIF_SYSCALL_TRACE))
1145 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);