1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/ptrace.c
6 * edited by Linus Torvalds
7 * ARM modifications Copyright (C) 2000 Russell King
8 * Copyright (C) 2012 ARM Ltd.
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/elf.h>
32 #include <asm/compat.h>
33 #include <asm/cpufeature.h>
34 #include <asm/debug-monitors.h>
35 #include <asm/fpsimd.h>
37 #include <asm/pointer_auth.h>
38 #include <asm/stacktrace.h>
39 #include <asm/syscall.h>
40 #include <asm/traps.h>
41 #include <asm/system_misc.h>
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/syscalls.h>
46 struct pt_regs_offset {
51 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
52 #define REG_OFFSET_END {.name = NULL, .offset = 0}
53 #define GPR_OFFSET_NAME(r) \
54 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
56 static const struct pt_regs_offset regoffset_table[] = {
88 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
91 REG_OFFSET_NAME(pstate),
96 * regs_query_register_offset() - query register offset from its name
97 * @name: the name of a register
99 * regs_query_register_offset() returns the offset of a register in struct
100 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
102 int regs_query_register_offset(const char *name)
104 const struct pt_regs_offset *roff;
106 for (roff = regoffset_table; roff->name != NULL; roff++)
107 if (!strcmp(roff->name, name))
113 * regs_within_kernel_stack() - check the address in the stack
114 * @regs: pt_regs which contains kernel stack pointer.
115 * @addr: address which is checked.
117 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
118 * If @addr is within the kernel stack, it returns true. If not, returns false.
120 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
122 return ((addr & ~(THREAD_SIZE - 1)) ==
123 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
124 on_irq_stack(addr, sizeof(unsigned long));
128 * regs_get_kernel_stack_nth() - get Nth entry of the stack
129 * @regs: pt_regs which contains kernel stack pointer.
130 * @n: stack entry number.
132 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
133 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
136 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
138 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
141 if (regs_within_kernel_stack(regs, (unsigned long)addr))
148 * TODO: does not yet catch signals sent when the child dies.
149 * in exit.c or in signal.c.
153 * Called by kernel/ptrace.c when detaching..
155 void ptrace_disable(struct task_struct *child)
158 * This would be better off in core code, but PTRACE_DETACH has
159 * grown its fair share of arch-specific worts and changing it
160 * is likely to cause regressions on obscure architectures.
162 user_disable_single_step(child);
165 #ifdef CONFIG_HAVE_HW_BREAKPOINT
167 * Handle hitting a HW-breakpoint.
169 static void ptrace_hbptriggered(struct perf_event *bp,
170 struct perf_sample_data *data,
171 struct pt_regs *regs)
173 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
174 const char *desc = "Hardware breakpoint trap (ptrace)";
177 if (is_compat_task()) {
181 for (i = 0; i < ARM_MAX_BRP; ++i) {
182 if (current->thread.debug.hbp_break[i] == bp) {
183 si_errno = (i << 1) + 1;
188 for (i = 0; i < ARM_MAX_WRP; ++i) {
189 if (current->thread.debug.hbp_watch[i] == bp) {
190 si_errno = -((i << 1) + 1);
194 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger,
199 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc);
203 * Unregister breakpoints from this task and reset the pointers in
206 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
209 struct thread_struct *t = &tsk->thread;
211 for (i = 0; i < ARM_MAX_BRP; i++) {
212 if (t->debug.hbp_break[i]) {
213 unregister_hw_breakpoint(t->debug.hbp_break[i]);
214 t->debug.hbp_break[i] = NULL;
218 for (i = 0; i < ARM_MAX_WRP; i++) {
219 if (t->debug.hbp_watch[i]) {
220 unregister_hw_breakpoint(t->debug.hbp_watch[i]);
221 t->debug.hbp_watch[i] = NULL;
226 void ptrace_hw_copy_thread(struct task_struct *tsk)
228 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
231 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
232 struct task_struct *tsk,
235 struct perf_event *bp = ERR_PTR(-EINVAL);
238 case NT_ARM_HW_BREAK:
239 if (idx >= ARM_MAX_BRP)
241 idx = array_index_nospec(idx, ARM_MAX_BRP);
242 bp = tsk->thread.debug.hbp_break[idx];
244 case NT_ARM_HW_WATCH:
245 if (idx >= ARM_MAX_WRP)
247 idx = array_index_nospec(idx, ARM_MAX_WRP);
248 bp = tsk->thread.debug.hbp_watch[idx];
256 static int ptrace_hbp_set_event(unsigned int note_type,
257 struct task_struct *tsk,
259 struct perf_event *bp)
264 case NT_ARM_HW_BREAK:
265 if (idx >= ARM_MAX_BRP)
267 idx = array_index_nospec(idx, ARM_MAX_BRP);
268 tsk->thread.debug.hbp_break[idx] = bp;
271 case NT_ARM_HW_WATCH:
272 if (idx >= ARM_MAX_WRP)
274 idx = array_index_nospec(idx, ARM_MAX_WRP);
275 tsk->thread.debug.hbp_watch[idx] = bp;
284 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
285 struct task_struct *tsk,
288 struct perf_event *bp;
289 struct perf_event_attr attr;
293 case NT_ARM_HW_BREAK:
294 type = HW_BREAKPOINT_X;
296 case NT_ARM_HW_WATCH:
297 type = HW_BREAKPOINT_RW;
300 return ERR_PTR(-EINVAL);
303 ptrace_breakpoint_init(&attr);
306 * Initialise fields to sane defaults
307 * (i.e. values that will pass validation).
310 attr.bp_len = HW_BREAKPOINT_LEN_4;
314 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
318 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
325 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
326 struct arch_hw_breakpoint_ctrl ctrl,
327 struct perf_event_attr *attr)
329 int err, len, type, offset, disabled = !ctrl.enabled;
331 attr->disabled = disabled;
335 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
340 case NT_ARM_HW_BREAK:
341 if ((type & HW_BREAKPOINT_X) != type)
344 case NT_ARM_HW_WATCH:
345 if ((type & HW_BREAKPOINT_RW) != type)
353 attr->bp_type = type;
354 attr->bp_addr += offset;
359 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
365 case NT_ARM_HW_BREAK:
366 num = hw_breakpoint_slots(TYPE_INST);
368 case NT_ARM_HW_WATCH:
369 num = hw_breakpoint_slots(TYPE_DATA);
375 reg |= debug_monitors_arch();
383 static int ptrace_hbp_get_ctrl(unsigned int note_type,
384 struct task_struct *tsk,
388 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
393 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
397 static int ptrace_hbp_get_addr(unsigned int note_type,
398 struct task_struct *tsk,
402 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
407 *addr = bp ? counter_arch_bp(bp)->address : 0;
411 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
412 struct task_struct *tsk,
415 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
418 bp = ptrace_hbp_create(note_type, tsk, idx);
423 static int ptrace_hbp_set_ctrl(unsigned int note_type,
424 struct task_struct *tsk,
429 struct perf_event *bp;
430 struct perf_event_attr attr;
431 struct arch_hw_breakpoint_ctrl ctrl;
433 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
440 decode_ctrl_reg(uctrl, &ctrl);
441 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
445 return modify_user_hw_breakpoint(bp, &attr);
448 static int ptrace_hbp_set_addr(unsigned int note_type,
449 struct task_struct *tsk,
454 struct perf_event *bp;
455 struct perf_event_attr attr;
457 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
465 err = modify_user_hw_breakpoint(bp, &attr);
469 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
470 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
471 #define PTRACE_HBP_PAD_SZ sizeof(u32)
473 static int hw_break_get(struct task_struct *target,
474 const struct user_regset *regset,
477 unsigned int note_type = regset->core_note_type;
483 ret = ptrace_hbp_get_resource_info(note_type, &info);
487 membuf_write(&to, &info, sizeof(info));
488 membuf_zero(&to, sizeof(u32));
489 /* (address, ctrl) registers */
491 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
494 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
497 membuf_store(&to, addr);
498 membuf_store(&to, ctrl);
499 membuf_zero(&to, sizeof(u32));
505 static int hw_break_set(struct task_struct *target,
506 const struct user_regset *regset,
507 unsigned int pos, unsigned int count,
508 const void *kbuf, const void __user *ubuf)
510 unsigned int note_type = regset->core_note_type;
511 int ret, idx = 0, offset, limit;
515 /* Resource info and pad */
516 offset = offsetof(struct user_hwdebug_state, dbg_regs);
517 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
519 /* (address, ctrl) registers */
520 limit = regset->n * regset->size;
521 while (count && offset < limit) {
522 if (count < PTRACE_HBP_ADDR_SZ)
524 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
525 offset, offset + PTRACE_HBP_ADDR_SZ);
528 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
531 offset += PTRACE_HBP_ADDR_SZ;
535 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
536 offset, offset + PTRACE_HBP_CTRL_SZ);
539 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
542 offset += PTRACE_HBP_CTRL_SZ;
544 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
545 offset, offset + PTRACE_HBP_PAD_SZ);
546 offset += PTRACE_HBP_PAD_SZ;
552 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
554 static int gpr_get(struct task_struct *target,
555 const struct user_regset *regset,
558 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
559 return membuf_write(&to, uregs, sizeof(*uregs));
562 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
563 unsigned int pos, unsigned int count,
564 const void *kbuf, const void __user *ubuf)
567 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
569 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
573 if (!valid_user_regs(&newregs, target))
576 task_pt_regs(target)->user_regs = newregs;
580 static int fpr_active(struct task_struct *target, const struct user_regset *regset)
582 if (!system_supports_fpsimd())
588 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
590 static int __fpr_get(struct task_struct *target,
591 const struct user_regset *regset,
594 struct user_fpsimd_state *uregs;
596 sve_sync_to_fpsimd(target);
598 uregs = &target->thread.uw.fpsimd_state;
600 return membuf_write(&to, uregs, sizeof(*uregs));
603 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
606 if (!system_supports_fpsimd())
609 if (target == current)
610 fpsimd_preserve_current_state();
612 return __fpr_get(target, regset, to);
615 static int __fpr_set(struct task_struct *target,
616 const struct user_regset *regset,
617 unsigned int pos, unsigned int count,
618 const void *kbuf, const void __user *ubuf,
619 unsigned int start_pos)
622 struct user_fpsimd_state newstate;
625 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
626 * short copyin can't resurrect stale data.
628 sve_sync_to_fpsimd(target);
630 newstate = target->thread.uw.fpsimd_state;
632 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
633 start_pos, start_pos + sizeof(newstate));
637 target->thread.uw.fpsimd_state = newstate;
642 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
643 unsigned int pos, unsigned int count,
644 const void *kbuf, const void __user *ubuf)
648 if (!system_supports_fpsimd())
651 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
655 sve_sync_from_fpsimd_zeropad(target);
656 fpsimd_flush_task_state(target);
661 static int tls_get(struct task_struct *target, const struct user_regset *regset,
666 if (target == current)
667 tls_preserve_current_state();
669 ret = membuf_store(&to, target->thread.uw.tp_value);
670 if (system_supports_tpidr2())
671 ret = membuf_store(&to, target->thread.tpidr2_el0);
673 ret = membuf_zero(&to, sizeof(u64));
678 static int tls_set(struct task_struct *target, const struct user_regset *regset,
679 unsigned int pos, unsigned int count,
680 const void *kbuf, const void __user *ubuf)
683 unsigned long tls[2];
685 tls[0] = target->thread.uw.tp_value;
686 if (system_supports_sme())
687 tls[1] = target->thread.tpidr2_el0;
689 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, tls, 0, count);
693 target->thread.uw.tp_value = tls[0];
694 if (system_supports_sme())
695 target->thread.tpidr2_el0 = tls[1];
700 static int system_call_get(struct task_struct *target,
701 const struct user_regset *regset,
704 return membuf_store(&to, task_pt_regs(target)->syscallno);
707 static int system_call_set(struct task_struct *target,
708 const struct user_regset *regset,
709 unsigned int pos, unsigned int count,
710 const void *kbuf, const void __user *ubuf)
712 int syscallno = task_pt_regs(target)->syscallno;
715 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
719 task_pt_regs(target)->syscallno = syscallno;
723 #ifdef CONFIG_ARM64_SVE
725 static void sve_init_header_from_task(struct user_sve_header *header,
726 struct task_struct *target,
732 enum vec_type task_type;
734 memset(header, 0, sizeof(*header));
736 /* Check if the requested registers are active for the task */
737 if (thread_sm_enabled(&target->thread))
738 task_type = ARM64_VEC_SME;
740 task_type = ARM64_VEC_SVE;
741 active = (task_type == type);
745 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
746 header->flags |= SVE_PT_VL_INHERIT;
747 fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
750 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
751 header->flags |= SVE_PT_VL_INHERIT;
761 header->flags |= SVE_PT_REGS_FPSIMD;
763 header->flags |= SVE_PT_REGS_SVE;
767 header->vl = task_get_vl(target, type);
768 vq = sve_vq_from_vl(header->vl);
770 header->max_vl = vec_max_vl(type);
771 header->size = SVE_PT_SIZE(vq, header->flags);
772 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
776 static unsigned int sve_size_from_header(struct user_sve_header const *header)
778 return ALIGN(header->size, SVE_VQ_BYTES);
781 static int sve_get_common(struct task_struct *target,
782 const struct user_regset *regset,
786 struct user_sve_header header;
788 unsigned long start, end;
791 sve_init_header_from_task(&header, target, type);
792 vq = sve_vq_from_vl(header.vl);
794 membuf_write(&to, &header, sizeof(header));
796 if (target == current)
797 fpsimd_preserve_current_state();
799 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
800 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
802 switch ((header.flags & SVE_PT_REGS_MASK)) {
803 case SVE_PT_REGS_FPSIMD:
804 return __fpr_get(target, regset, to);
806 case SVE_PT_REGS_SVE:
807 start = SVE_PT_SVE_OFFSET;
808 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
809 membuf_write(&to, target->thread.sve_state, end - start);
812 end = SVE_PT_SVE_FPSR_OFFSET(vq);
813 membuf_zero(&to, end - start);
816 * Copy fpsr, and fpcr which must follow contiguously in
817 * struct fpsimd_state:
820 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
821 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
825 end = sve_size_from_header(&header);
826 return membuf_zero(&to, end - start);
833 static int sve_get(struct task_struct *target,
834 const struct user_regset *regset,
837 if (!system_supports_sve())
840 return sve_get_common(target, regset, to, ARM64_VEC_SVE);
843 static int sve_set_common(struct task_struct *target,
844 const struct user_regset *regset,
845 unsigned int pos, unsigned int count,
846 const void *kbuf, const void __user *ubuf,
850 struct user_sve_header header;
852 unsigned long start, end;
855 if (count < sizeof(header))
857 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
863 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
864 * vec_set_vector_length(), which will also validate them for us:
866 ret = vec_set_vector_length(target, type, header.vl,
867 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
871 /* Actual VL set may be less than the user asked for: */
872 vq = sve_vq_from_vl(task_get_vl(target, type));
874 /* Enter/exit streaming mode */
875 if (system_supports_sme()) {
876 u64 old_svcr = target->thread.svcr;
880 target->thread.svcr &= ~SVCR_SM_MASK;
883 target->thread.svcr |= SVCR_SM_MASK;
891 * If we switched then invalidate any existing SVE
892 * state and ensure there's storage.
894 if (target->thread.svcr != old_svcr)
895 sve_alloc(target, true);
898 /* Registers: FPSIMD-only case */
900 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
901 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
902 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
903 SVE_PT_FPSIMD_OFFSET);
904 clear_tsk_thread_flag(target, TIF_SVE);
905 target->thread.fp_type = FP_STATE_FPSIMD;
910 * Otherwise: no registers or full SVE case. For backwards
911 * compatibility reasons we treat empty flags as SVE registers.
915 * If setting a different VL from the requested VL and there is
916 * register data, the data layout will be wrong: don't even
917 * try to set the registers in this case.
919 if (count && vq != sve_vq_from_vl(header.vl)) {
924 sve_alloc(target, true);
925 if (!target->thread.sve_state) {
927 clear_tsk_thread_flag(target, TIF_SVE);
928 target->thread.fp_type = FP_STATE_FPSIMD;
933 * Ensure target->thread.sve_state is up to date with target's
934 * FPSIMD regs, so that a short copyin leaves trailing
935 * registers unmodified. Always enable SVE even if going into
938 fpsimd_sync_to_sve(target);
939 set_tsk_thread_flag(target, TIF_SVE);
940 target->thread.fp_type = FP_STATE_SVE;
942 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
943 start = SVE_PT_SVE_OFFSET;
944 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
945 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
946 target->thread.sve_state,
952 end = SVE_PT_SVE_FPSR_OFFSET(vq);
953 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, start, end);
956 * Copy fpsr, and fpcr which must follow contiguously in
957 * struct fpsimd_state:
960 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
961 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
962 &target->thread.uw.fpsimd_state.fpsr,
966 fpsimd_flush_task_state(target);
970 static int sve_set(struct task_struct *target,
971 const struct user_regset *regset,
972 unsigned int pos, unsigned int count,
973 const void *kbuf, const void __user *ubuf)
975 if (!system_supports_sve())
978 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
982 #endif /* CONFIG_ARM64_SVE */
984 #ifdef CONFIG_ARM64_SME
986 static int ssve_get(struct task_struct *target,
987 const struct user_regset *regset,
990 if (!system_supports_sme())
993 return sve_get_common(target, regset, to, ARM64_VEC_SME);
996 static int ssve_set(struct task_struct *target,
997 const struct user_regset *regset,
998 unsigned int pos, unsigned int count,
999 const void *kbuf, const void __user *ubuf)
1001 if (!system_supports_sme())
1004 return sve_set_common(target, regset, pos, count, kbuf, ubuf,
1008 static int za_get(struct task_struct *target,
1009 const struct user_regset *regset,
1012 struct user_za_header header;
1014 unsigned long start, end;
1016 if (!system_supports_sme())
1020 memset(&header, 0, sizeof(header));
1022 if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
1023 header.flags |= ZA_PT_VL_INHERIT;
1025 header.vl = task_get_sme_vl(target);
1026 vq = sve_vq_from_vl(header.vl);
1027 header.max_vl = sme_max_vl();
1028 header.max_size = ZA_PT_SIZE(vq);
1030 /* If ZA is not active there is only the header */
1031 if (thread_za_enabled(&target->thread))
1032 header.size = ZA_PT_SIZE(vq);
1034 header.size = ZA_PT_ZA_OFFSET;
1036 membuf_write(&to, &header, sizeof(header));
1038 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1039 end = ZA_PT_ZA_OFFSET;
1041 if (target == current)
1042 fpsimd_preserve_current_state();
1044 /* Any register data to include? */
1045 if (thread_za_enabled(&target->thread)) {
1047 end = ZA_PT_SIZE(vq);
1048 membuf_write(&to, target->thread.za_state, end - start);
1051 /* Zero any trailing padding */
1053 end = ALIGN(header.size, SVE_VQ_BYTES);
1054 return membuf_zero(&to, end - start);
1057 static int za_set(struct task_struct *target,
1058 const struct user_regset *regset,
1059 unsigned int pos, unsigned int count,
1060 const void *kbuf, const void __user *ubuf)
1063 struct user_za_header header;
1065 unsigned long start, end;
1067 if (!system_supports_sme())
1071 if (count < sizeof(header))
1073 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
1079 * All current ZA_PT_* flags are consumed by
1080 * vec_set_vector_length(), which will also validate them for
1083 ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
1084 ((unsigned long)header.flags) << 16);
1088 /* Actual VL set may be less than the user asked for: */
1089 vq = sve_vq_from_vl(task_get_sme_vl(target));
1091 /* Ensure there is some SVE storage for streaming mode */
1092 if (!target->thread.sve_state) {
1093 sve_alloc(target, false);
1094 if (!target->thread.sve_state) {
1100 /* Allocate/reinit ZA storage */
1102 if (!target->thread.za_state) {
1107 /* If there is no data then disable ZA */
1109 target->thread.svcr &= ~SVCR_ZA_MASK;
1114 * If setting a different VL from the requested VL and there is
1115 * register data, the data layout will be wrong: don't even
1116 * try to set the registers in this case.
1118 if (vq != sve_vq_from_vl(header.vl)) {
1123 BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
1124 start = ZA_PT_ZA_OFFSET;
1125 end = ZA_PT_SIZE(vq);
1126 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1127 target->thread.za_state,
1132 /* Mark ZA as active and let userspace use it */
1133 set_tsk_thread_flag(target, TIF_SME);
1134 target->thread.svcr |= SVCR_ZA_MASK;
1137 fpsimd_flush_task_state(target);
1141 #endif /* CONFIG_ARM64_SME */
1143 #ifdef CONFIG_ARM64_PTR_AUTH
1144 static int pac_mask_get(struct task_struct *target,
1145 const struct user_regset *regset,
1149 * The PAC bits can differ across data and instruction pointers
1150 * depending on TCR_EL1.TBID*, which we may make use of in future, so
1151 * we expose separate masks.
1153 unsigned long mask = ptrauth_user_pac_mask();
1154 struct user_pac_mask uregs = {
1159 if (!system_supports_address_auth())
1162 return membuf_write(&to, &uregs, sizeof(uregs));
1165 static int pac_enabled_keys_get(struct task_struct *target,
1166 const struct user_regset *regset,
1169 long enabled_keys = ptrauth_get_enabled_keys(target);
1171 if (IS_ERR_VALUE(enabled_keys))
1172 return enabled_keys;
1174 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
1177 static int pac_enabled_keys_set(struct task_struct *target,
1178 const struct user_regset *regset,
1179 unsigned int pos, unsigned int count,
1180 const void *kbuf, const void __user *ubuf)
1183 long enabled_keys = ptrauth_get_enabled_keys(target);
1185 if (IS_ERR_VALUE(enabled_keys))
1186 return enabled_keys;
1188 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
1193 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
1197 #ifdef CONFIG_CHECKPOINT_RESTORE
1198 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
1200 return (__uint128_t)key->hi << 64 | key->lo;
1203 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
1205 struct ptrauth_key key = {
1206 .lo = (unsigned long)ukey,
1207 .hi = (unsigned long)(ukey >> 64),
1213 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
1214 const struct ptrauth_keys_user *keys)
1216 ukeys->apiakey = pac_key_to_user(&keys->apia);
1217 ukeys->apibkey = pac_key_to_user(&keys->apib);
1218 ukeys->apdakey = pac_key_to_user(&keys->apda);
1219 ukeys->apdbkey = pac_key_to_user(&keys->apdb);
1222 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys,
1223 const struct user_pac_address_keys *ukeys)
1225 keys->apia = pac_key_from_user(ukeys->apiakey);
1226 keys->apib = pac_key_from_user(ukeys->apibkey);
1227 keys->apda = pac_key_from_user(ukeys->apdakey);
1228 keys->apdb = pac_key_from_user(ukeys->apdbkey);
1231 static int pac_address_keys_get(struct task_struct *target,
1232 const struct user_regset *regset,
1235 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1236 struct user_pac_address_keys user_keys;
1238 if (!system_supports_address_auth())
1241 pac_address_keys_to_user(&user_keys, keys);
1243 return membuf_write(&to, &user_keys, sizeof(user_keys));
1246 static int pac_address_keys_set(struct task_struct *target,
1247 const struct user_regset *regset,
1248 unsigned int pos, unsigned int count,
1249 const void *kbuf, const void __user *ubuf)
1251 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1252 struct user_pac_address_keys user_keys;
1255 if (!system_supports_address_auth())
1258 pac_address_keys_to_user(&user_keys, keys);
1259 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1263 pac_address_keys_from_user(keys, &user_keys);
1268 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1269 const struct ptrauth_keys_user *keys)
1271 ukeys->apgakey = pac_key_to_user(&keys->apga);
1274 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys,
1275 const struct user_pac_generic_keys *ukeys)
1277 keys->apga = pac_key_from_user(ukeys->apgakey);
1280 static int pac_generic_keys_get(struct task_struct *target,
1281 const struct user_regset *regset,
1284 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1285 struct user_pac_generic_keys user_keys;
1287 if (!system_supports_generic_auth())
1290 pac_generic_keys_to_user(&user_keys, keys);
1292 return membuf_write(&to, &user_keys, sizeof(user_keys));
1295 static int pac_generic_keys_set(struct task_struct *target,
1296 const struct user_regset *regset,
1297 unsigned int pos, unsigned int count,
1298 const void *kbuf, const void __user *ubuf)
1300 struct ptrauth_keys_user *keys = &target->thread.keys_user;
1301 struct user_pac_generic_keys user_keys;
1304 if (!system_supports_generic_auth())
1307 pac_generic_keys_to_user(&user_keys, keys);
1308 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1312 pac_generic_keys_from_user(keys, &user_keys);
1316 #endif /* CONFIG_CHECKPOINT_RESTORE */
1317 #endif /* CONFIG_ARM64_PTR_AUTH */
1319 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1320 static int tagged_addr_ctrl_get(struct task_struct *target,
1321 const struct user_regset *regset,
1324 long ctrl = get_tagged_addr_ctrl(target);
1326 if (IS_ERR_VALUE(ctrl))
1329 return membuf_write(&to, &ctrl, sizeof(ctrl));
1332 static int tagged_addr_ctrl_set(struct task_struct *target, const struct
1333 user_regset *regset, unsigned int pos,
1334 unsigned int count, const void *kbuf, const
1340 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
1344 return set_tagged_addr_ctrl(target, ctrl);
1348 enum aarch64_regset {
1352 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1357 #ifdef CONFIG_ARM64_SVE
1360 #ifdef CONFIG_ARM64_SME
1364 #ifdef CONFIG_ARM64_PTR_AUTH
1366 REGSET_PAC_ENABLED_KEYS,
1367 #ifdef CONFIG_CHECKPOINT_RESTORE
1372 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1373 REGSET_TAGGED_ADDR_CTRL,
1377 static const struct user_regset aarch64_regsets[] = {
1379 .core_note_type = NT_PRSTATUS,
1380 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1381 .size = sizeof(u64),
1382 .align = sizeof(u64),
1383 .regset_get = gpr_get,
1387 .core_note_type = NT_PRFPREG,
1388 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1390 * We pretend we have 32-bit registers because the fpsr and
1391 * fpcr are 32-bits wide.
1393 .size = sizeof(u32),
1394 .align = sizeof(u32),
1395 .active = fpr_active,
1396 .regset_get = fpr_get,
1400 .core_note_type = NT_ARM_TLS,
1402 .size = sizeof(void *),
1403 .align = sizeof(void *),
1404 .regset_get = tls_get,
1407 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1408 [REGSET_HW_BREAK] = {
1409 .core_note_type = NT_ARM_HW_BREAK,
1410 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1411 .size = sizeof(u32),
1412 .align = sizeof(u32),
1413 .regset_get = hw_break_get,
1414 .set = hw_break_set,
1416 [REGSET_HW_WATCH] = {
1417 .core_note_type = NT_ARM_HW_WATCH,
1418 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1419 .size = sizeof(u32),
1420 .align = sizeof(u32),
1421 .regset_get = hw_break_get,
1422 .set = hw_break_set,
1425 [REGSET_SYSTEM_CALL] = {
1426 .core_note_type = NT_ARM_SYSTEM_CALL,
1428 .size = sizeof(int),
1429 .align = sizeof(int),
1430 .regset_get = system_call_get,
1431 .set = system_call_set,
1433 #ifdef CONFIG_ARM64_SVE
1434 [REGSET_SVE] = { /* Scalable Vector Extension */
1435 .core_note_type = NT_ARM_SVE,
1436 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1438 .size = SVE_VQ_BYTES,
1439 .align = SVE_VQ_BYTES,
1440 .regset_get = sve_get,
1444 #ifdef CONFIG_ARM64_SME
1445 [REGSET_SSVE] = { /* Streaming mode SVE */
1446 .core_note_type = NT_ARM_SSVE,
1447 .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
1449 .size = SVE_VQ_BYTES,
1450 .align = SVE_VQ_BYTES,
1451 .regset_get = ssve_get,
1454 [REGSET_ZA] = { /* SME ZA */
1455 .core_note_type = NT_ARM_ZA,
1457 * ZA is a single register but it's variably sized and
1458 * the ptrace core requires that the size of any data
1459 * be an exact multiple of the configured register
1460 * size so report as though we had SVE_VQ_BYTES
1461 * registers. These values aren't exposed to
1464 .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
1465 .size = SVE_VQ_BYTES,
1466 .align = SVE_VQ_BYTES,
1467 .regset_get = za_get,
1471 #ifdef CONFIG_ARM64_PTR_AUTH
1472 [REGSET_PAC_MASK] = {
1473 .core_note_type = NT_ARM_PAC_MASK,
1474 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1475 .size = sizeof(u64),
1476 .align = sizeof(u64),
1477 .regset_get = pac_mask_get,
1478 /* this cannot be set dynamically */
1480 [REGSET_PAC_ENABLED_KEYS] = {
1481 .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
1483 .size = sizeof(long),
1484 .align = sizeof(long),
1485 .regset_get = pac_enabled_keys_get,
1486 .set = pac_enabled_keys_set,
1488 #ifdef CONFIG_CHECKPOINT_RESTORE
1489 [REGSET_PACA_KEYS] = {
1490 .core_note_type = NT_ARM_PACA_KEYS,
1491 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1492 .size = sizeof(__uint128_t),
1493 .align = sizeof(__uint128_t),
1494 .regset_get = pac_address_keys_get,
1495 .set = pac_address_keys_set,
1497 [REGSET_PACG_KEYS] = {
1498 .core_note_type = NT_ARM_PACG_KEYS,
1499 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1500 .size = sizeof(__uint128_t),
1501 .align = sizeof(__uint128_t),
1502 .regset_get = pac_generic_keys_get,
1503 .set = pac_generic_keys_set,
1507 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1508 [REGSET_TAGGED_ADDR_CTRL] = {
1509 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL,
1511 .size = sizeof(long),
1512 .align = sizeof(long),
1513 .regset_get = tagged_addr_ctrl_get,
1514 .set = tagged_addr_ctrl_set,
1519 static const struct user_regset_view user_aarch64_view = {
1520 .name = "aarch64", .e_machine = EM_AARCH64,
1521 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1524 #ifdef CONFIG_COMPAT
1525 enum compat_regset {
1530 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx)
1532 struct pt_regs *regs = task_pt_regs(task);
1538 return pstate_to_compat_psr(regs->pstate);
1540 return regs->orig_x0;
1542 return regs->regs[idx];
1546 static int compat_gpr_get(struct task_struct *target,
1547 const struct user_regset *regset,
1553 membuf_store(&to, compat_get_user_reg(target, i++));
1557 static int compat_gpr_set(struct task_struct *target,
1558 const struct user_regset *regset,
1559 unsigned int pos, unsigned int count,
1560 const void *kbuf, const void __user *ubuf)
1562 struct pt_regs newregs;
1564 unsigned int i, start, num_regs;
1566 /* Calculate the number of AArch32 registers contained in count */
1567 num_regs = count / regset->size;
1569 /* Convert pos into an register number */
1570 start = pos / regset->size;
1572 if (start + num_regs > regset->n)
1575 newregs = *task_pt_regs(target);
1577 for (i = 0; i < num_regs; ++i) {
1578 unsigned int idx = start + i;
1582 memcpy(®, kbuf, sizeof(reg));
1583 kbuf += sizeof(reg);
1585 ret = copy_from_user(®, ubuf, sizeof(reg));
1591 ubuf += sizeof(reg);
1599 reg = compat_psr_to_pstate(reg);
1600 newregs.pstate = reg;
1603 newregs.orig_x0 = reg;
1606 newregs.regs[idx] = reg;
1611 if (valid_user_regs(&newregs.user_regs, target))
1612 *task_pt_regs(target) = newregs;
1619 static int compat_vfp_get(struct task_struct *target,
1620 const struct user_regset *regset,
1623 struct user_fpsimd_state *uregs;
1624 compat_ulong_t fpscr;
1626 if (!system_supports_fpsimd())
1629 uregs = &target->thread.uw.fpsimd_state;
1631 if (target == current)
1632 fpsimd_preserve_current_state();
1635 * The VFP registers are packed into the fpsimd_state, so they all sit
1636 * nicely together for us. We just need to create the fpscr separately.
1638 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t));
1639 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1640 (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1641 return membuf_store(&to, fpscr);
1644 static int compat_vfp_set(struct task_struct *target,
1645 const struct user_regset *regset,
1646 unsigned int pos, unsigned int count,
1647 const void *kbuf, const void __user *ubuf)
1649 struct user_fpsimd_state *uregs;
1650 compat_ulong_t fpscr;
1651 int ret, vregs_end_pos;
1653 if (!system_supports_fpsimd())
1656 uregs = &target->thread.uw.fpsimd_state;
1658 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1659 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1662 if (count && !ret) {
1663 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1664 vregs_end_pos, VFP_STATE_SIZE);
1666 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1667 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1671 fpsimd_flush_task_state(target);
1675 static int compat_tls_get(struct task_struct *target,
1676 const struct user_regset *regset,
1679 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value);
1682 static int compat_tls_set(struct task_struct *target,
1683 const struct user_regset *regset, unsigned int pos,
1684 unsigned int count, const void *kbuf,
1685 const void __user *ubuf)
1688 compat_ulong_t tls = target->thread.uw.tp_value;
1690 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1694 target->thread.uw.tp_value = tls;
1698 static const struct user_regset aarch32_regsets[] = {
1699 [REGSET_COMPAT_GPR] = {
1700 .core_note_type = NT_PRSTATUS,
1701 .n = COMPAT_ELF_NGREG,
1702 .size = sizeof(compat_elf_greg_t),
1703 .align = sizeof(compat_elf_greg_t),
1704 .regset_get = compat_gpr_get,
1705 .set = compat_gpr_set
1707 [REGSET_COMPAT_VFP] = {
1708 .core_note_type = NT_ARM_VFP,
1709 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1710 .size = sizeof(compat_ulong_t),
1711 .align = sizeof(compat_ulong_t),
1712 .active = fpr_active,
1713 .regset_get = compat_vfp_get,
1714 .set = compat_vfp_set
1718 static const struct user_regset_view user_aarch32_view = {
1719 .name = "aarch32", .e_machine = EM_ARM,
1720 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1723 static const struct user_regset aarch32_ptrace_regsets[] = {
1725 .core_note_type = NT_PRSTATUS,
1726 .n = COMPAT_ELF_NGREG,
1727 .size = sizeof(compat_elf_greg_t),
1728 .align = sizeof(compat_elf_greg_t),
1729 .regset_get = compat_gpr_get,
1730 .set = compat_gpr_set
1733 .core_note_type = NT_ARM_VFP,
1734 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1735 .size = sizeof(compat_ulong_t),
1736 .align = sizeof(compat_ulong_t),
1737 .regset_get = compat_vfp_get,
1738 .set = compat_vfp_set
1741 .core_note_type = NT_ARM_TLS,
1743 .size = sizeof(compat_ulong_t),
1744 .align = sizeof(compat_ulong_t),
1745 .regset_get = compat_tls_get,
1746 .set = compat_tls_set,
1748 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1749 [REGSET_HW_BREAK] = {
1750 .core_note_type = NT_ARM_HW_BREAK,
1751 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1752 .size = sizeof(u32),
1753 .align = sizeof(u32),
1754 .regset_get = hw_break_get,
1755 .set = hw_break_set,
1757 [REGSET_HW_WATCH] = {
1758 .core_note_type = NT_ARM_HW_WATCH,
1759 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1760 .size = sizeof(u32),
1761 .align = sizeof(u32),
1762 .regset_get = hw_break_get,
1763 .set = hw_break_set,
1766 [REGSET_SYSTEM_CALL] = {
1767 .core_note_type = NT_ARM_SYSTEM_CALL,
1769 .size = sizeof(int),
1770 .align = sizeof(int),
1771 .regset_get = system_call_get,
1772 .set = system_call_set,
1776 static const struct user_regset_view user_aarch32_ptrace_view = {
1777 .name = "aarch32", .e_machine = EM_ARM,
1778 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1781 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1782 compat_ulong_t __user *ret)
1789 if (off == COMPAT_PT_TEXT_ADDR)
1790 tmp = tsk->mm->start_code;
1791 else if (off == COMPAT_PT_DATA_ADDR)
1792 tmp = tsk->mm->start_data;
1793 else if (off == COMPAT_PT_TEXT_END_ADDR)
1794 tmp = tsk->mm->end_code;
1795 else if (off < sizeof(compat_elf_gregset_t))
1796 tmp = compat_get_user_reg(tsk, off >> 2);
1797 else if (off >= COMPAT_USER_SZ)
1802 return put_user(tmp, ret);
1805 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1808 struct pt_regs newregs = *task_pt_regs(tsk);
1809 unsigned int idx = off / 4;
1811 if (off & 3 || off >= COMPAT_USER_SZ)
1814 if (off >= sizeof(compat_elf_gregset_t))
1822 newregs.pstate = compat_psr_to_pstate(val);
1825 newregs.orig_x0 = val;
1828 newregs.regs[idx] = val;
1831 if (!valid_user_regs(&newregs.user_regs, tsk))
1834 *task_pt_regs(tsk) = newregs;
1838 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1841 * Convert a virtual register number into an index for a thread_info
1842 * breakpoint array. Breakpoints are identified using positive numbers
1843 * whilst watchpoints are negative. The registers are laid out as pairs
1844 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1845 * Register 0 is reserved for describing resource information.
1847 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1849 return (abs(num) - 1) >> 1;
1852 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1854 u8 num_brps, num_wrps, debug_arch, wp_len;
1857 num_brps = hw_breakpoint_slots(TYPE_INST);
1858 num_wrps = hw_breakpoint_slots(TYPE_DATA);
1860 debug_arch = debug_monitors_arch();
1874 static int compat_ptrace_hbp_get(unsigned int note_type,
1875 struct task_struct *tsk,
1882 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1885 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1888 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1895 static int compat_ptrace_hbp_set(unsigned int note_type,
1896 struct task_struct *tsk,
1903 int err, idx = compat_ptrace_hbp_num_to_idx(num);
1907 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1910 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1916 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1917 compat_ulong_t __user *data)
1924 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1926 } else if (num == 0) {
1927 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1930 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1934 ret = put_user(kdata, data);
1939 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1940 compat_ulong_t __user *data)
1948 ret = get_user(kdata, data);
1953 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1955 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1959 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1961 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1962 compat_ulong_t caddr, compat_ulong_t cdata)
1964 unsigned long addr = caddr;
1965 unsigned long data = cdata;
1966 void __user *datap = compat_ptr(data);
1970 case PTRACE_PEEKUSR:
1971 ret = compat_ptrace_read_user(child, addr, datap);
1974 case PTRACE_POKEUSR:
1975 ret = compat_ptrace_write_user(child, addr, data);
1978 case COMPAT_PTRACE_GETREGS:
1979 ret = copy_regset_to_user(child,
1982 0, sizeof(compat_elf_gregset_t),
1986 case COMPAT_PTRACE_SETREGS:
1987 ret = copy_regset_from_user(child,
1990 0, sizeof(compat_elf_gregset_t),
1994 case COMPAT_PTRACE_GET_THREAD_AREA:
1995 ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
1996 (compat_ulong_t __user *)datap);
1999 case COMPAT_PTRACE_SET_SYSCALL:
2000 task_pt_regs(child)->syscallno = data;
2004 case COMPAT_PTRACE_GETVFPREGS:
2005 ret = copy_regset_to_user(child,
2012 case COMPAT_PTRACE_SETVFPREGS:
2013 ret = copy_regset_from_user(child,
2020 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2021 case COMPAT_PTRACE_GETHBPREGS:
2022 ret = compat_ptrace_gethbpregs(child, addr, datap);
2025 case COMPAT_PTRACE_SETHBPREGS:
2026 ret = compat_ptrace_sethbpregs(child, addr, datap);
2031 ret = compat_ptrace_request(child, request, addr,
2038 #endif /* CONFIG_COMPAT */
2040 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2042 #ifdef CONFIG_COMPAT
2044 * Core dumping of 32-bit tasks or compat ptrace requests must use the
2045 * user_aarch32_view compatible with arm32. Native ptrace requests on
2046 * 32-bit children use an extended user_aarch32_ptrace_view to allow
2047 * access to the TLS register.
2049 if (is_compat_task())
2050 return &user_aarch32_view;
2051 else if (is_compat_thread(task_thread_info(task)))
2052 return &user_aarch32_ptrace_view;
2054 return &user_aarch64_view;
2057 long arch_ptrace(struct task_struct *child, long request,
2058 unsigned long addr, unsigned long data)
2061 case PTRACE_PEEKMTETAGS:
2062 case PTRACE_POKEMTETAGS:
2063 return mte_ptrace_copy_tags(child, request, addr, data);
2066 return ptrace_request(child, request, addr, data);
2069 enum ptrace_syscall_dir {
2070 PTRACE_SYSCALL_ENTER = 0,
2071 PTRACE_SYSCALL_EXIT,
2074 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
2077 unsigned long saved_reg;
2080 * We have some ABI weirdness here in the way that we handle syscall
2081 * exit stops because we indicate whether or not the stop has been
2082 * signalled from syscall entry or syscall exit by clobbering a general
2083 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
2084 * and restoring its old value after the stop. This means that:
2086 * - Any writes by the tracer to this register during the stop are
2087 * ignored/discarded.
2089 * - The actual value of the register is not available during the stop,
2090 * so the tracer cannot save it and restore it later.
2092 * - Syscall stops behave differently to seccomp and pseudo-step traps
2093 * (the latter do not nobble any registers).
2095 regno = (is_compat_task() ? 12 : 7);
2096 saved_reg = regs->regs[regno];
2097 regs->regs[regno] = dir;
2099 if (dir == PTRACE_SYSCALL_ENTER) {
2100 if (ptrace_report_syscall_entry(regs))
2101 forget_syscall(regs);
2102 regs->regs[regno] = saved_reg;
2103 } else if (!test_thread_flag(TIF_SINGLESTEP)) {
2104 ptrace_report_syscall_exit(regs, 0);
2105 regs->regs[regno] = saved_reg;
2107 regs->regs[regno] = saved_reg;
2110 * Signal a pseudo-step exception since we are stepping but
2111 * tracer modifications to the registers may have rewound the
2114 ptrace_report_syscall_exit(regs, 1);
2118 int syscall_trace_enter(struct pt_regs *regs)
2120 unsigned long flags = read_thread_flags();
2122 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
2123 report_syscall(regs, PTRACE_SYSCALL_ENTER);
2124 if (flags & _TIF_SYSCALL_EMU)
2128 /* Do the secure computing after ptrace; failures should be fast. */
2129 if (secure_computing() == -1)
2132 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
2133 trace_sys_enter(regs, regs->syscallno);
2135 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
2136 regs->regs[2], regs->regs[3]);
2138 return regs->syscallno;
2141 void syscall_trace_exit(struct pt_regs *regs)
2143 unsigned long flags = read_thread_flags();
2145 audit_syscall_exit(regs);
2147 if (flags & _TIF_SYSCALL_TRACEPOINT)
2148 trace_sys_exit(regs, syscall_get_return_value(current, regs));
2150 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
2151 report_syscall(regs, PTRACE_SYSCALL_EXIT);
2157 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
2158 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
2159 * not described in ARM DDI 0487D.a.
2160 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
2161 * be allocated an EL0 meaning in future.
2162 * Userspace cannot use these until they have an architectural meaning.
2163 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
2164 * We also reserve IL for the kernel; SS is handled dynamically.
2166 #define SPSR_EL1_AARCH64_RES0_BITS \
2167 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
2168 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
2169 #define SPSR_EL1_AARCH32_RES0_BITS \
2170 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
2172 static int valid_compat_regs(struct user_pt_regs *regs)
2174 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
2176 if (!system_supports_mixed_endian_el0()) {
2177 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
2178 regs->pstate |= PSR_AA32_E_BIT;
2180 regs->pstate &= ~PSR_AA32_E_BIT;
2183 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
2184 (regs->pstate & PSR_AA32_A_BIT) == 0 &&
2185 (regs->pstate & PSR_AA32_I_BIT) == 0 &&
2186 (regs->pstate & PSR_AA32_F_BIT) == 0) {
2191 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
2194 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
2195 PSR_AA32_C_BIT | PSR_AA32_V_BIT |
2196 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
2197 PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
2199 regs->pstate |= PSR_MODE32_BIT;
2204 static int valid_native_regs(struct user_pt_regs *regs)
2206 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
2208 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
2209 (regs->pstate & PSR_D_BIT) == 0 &&
2210 (regs->pstate & PSR_A_BIT) == 0 &&
2211 (regs->pstate & PSR_I_BIT) == 0 &&
2212 (regs->pstate & PSR_F_BIT) == 0) {
2216 /* Force PSR to a valid 64-bit EL0t */
2217 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
2223 * Are the current registers suitable for user mode? (used to maintain
2224 * security in signal handlers)
2226 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
2228 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
2229 user_regs_reset_single_step(regs, task);
2231 if (is_compat_thread(task_thread_info(task)))
2232 return valid_compat_regs(regs);
2234 return valid_native_regs(regs);