1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/signal.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/freezer.h>
15 #include <linux/stddef.h>
16 #include <linux/uaccess.h>
17 #include <linux/sizes.h>
18 #include <linux/string.h>
19 #include <linux/ratelimit.h>
20 #include <linux/rseq.h>
21 #include <linux/syscalls.h>
23 #include <asm/daifflags.h>
24 #include <asm/debug-monitors.h>
26 #include <asm/exception.h>
27 #include <asm/cacheflush.h>
28 #include <asm/ucontext.h>
29 #include <asm/unistd.h>
30 #include <asm/fpsimd.h>
31 #include <asm/ptrace.h>
32 #include <asm/syscall.h>
33 #include <asm/signal32.h>
34 #include <asm/traps.h>
38 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
50 struct rt_sigframe_user_layout {
51 struct rt_sigframe __user *sigframe;
52 struct frame_record __user *next_frame;
54 unsigned long size; /* size of allocated sigframe data */
55 unsigned long limit; /* largest allowed size */
57 unsigned long fpsimd_offset;
58 unsigned long esr_offset;
59 unsigned long sve_offset;
60 unsigned long tpidr2_offset;
61 unsigned long za_offset;
62 unsigned long zt_offset;
63 unsigned long fpmr_offset;
64 unsigned long extra_offset;
65 unsigned long end_offset;
68 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
69 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
70 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
72 static void init_user_layout(struct rt_sigframe_user_layout *user)
74 const size_t reserved_size =
75 sizeof(user->sigframe->uc.uc_mcontext.__reserved);
77 memset(user, 0, sizeof(*user));
78 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
80 user->limit = user->size + reserved_size;
82 user->limit -= TERMINATOR_SIZE;
83 user->limit -= EXTRA_CONTEXT_SIZE;
84 /* Reserve space for extension and terminator ^ */
87 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
89 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
93 * Sanity limit on the approximate maximum size of signal frame we'll
94 * try to generate. Stack alignment padding and the frame record are
95 * not taken into account. This limit is not a guarantee and is
98 #define SIGFRAME_MAXSZ SZ_256K
100 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
101 unsigned long *offset, size_t size, bool extend)
103 size_t padded_size = round_up(size, 16);
105 if (padded_size > user->limit - user->size &&
106 !user->extra_offset &&
110 user->limit += EXTRA_CONTEXT_SIZE;
111 ret = __sigframe_alloc(user, &user->extra_offset,
112 sizeof(struct extra_context), false);
114 user->limit -= EXTRA_CONTEXT_SIZE;
118 /* Reserve space for the __reserved[] terminator */
119 user->size += TERMINATOR_SIZE;
122 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
125 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
128 /* Still not enough space? Bad luck! */
129 if (padded_size > user->limit - user->size)
132 *offset = user->size;
133 user->size += padded_size;
139 * Allocate space for an optional record of <size> bytes in the user
140 * signal frame. The offset from the signal frame base address to the
141 * allocated block is assigned to *offset.
143 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
144 unsigned long *offset, size_t size)
146 return __sigframe_alloc(user, offset, size, true);
149 /* Allocate the null terminator record and prevent further allocations */
150 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
154 /* Un-reserve the space reserved for the terminator: */
155 user->limit += TERMINATOR_SIZE;
157 ret = sigframe_alloc(user, &user->end_offset,
158 sizeof(struct _aarch64_ctx));
162 /* Prevent further allocation: */
163 user->limit = user->size;
167 static void __user *apply_user_offset(
168 struct rt_sigframe_user_layout const *user, unsigned long offset)
170 char __user *base = (char __user *)user->sigframe;
172 return base + offset;
176 struct fpsimd_context __user *fpsimd;
178 struct sve_context __user *sve;
180 struct tpidr2_context __user *tpidr2;
182 struct za_context __user *za;
184 struct zt_context __user *zt;
186 struct fpmr_context __user *fpmr;
190 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
192 struct user_fpsimd_state const *fpsimd =
193 ¤t->thread.uw.fpsimd_state;
196 /* copy the FP and status/control registers */
197 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
198 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
199 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
201 /* copy the magic/size information */
202 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
203 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
205 return err ? -EFAULT : 0;
208 static int restore_fpsimd_context(struct user_ctxs *user)
210 struct user_fpsimd_state fpsimd;
213 /* check the size information */
214 if (user->fpsimd_size != sizeof(struct fpsimd_context))
217 /* copy the FP and status/control registers */
218 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs),
219 sizeof(fpsimd.vregs));
220 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err);
221 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err);
223 clear_thread_flag(TIF_SVE);
224 current->thread.fp_type = FP_STATE_FPSIMD;
226 /* load the hardware registers from the fpsimd_state structure */
228 fpsimd_update_current_state(&fpsimd);
230 return err ? -EFAULT : 0;
233 static int preserve_fpmr_context(struct fpmr_context __user *ctx)
237 current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR);
239 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err);
240 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
241 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err);
246 static int restore_fpmr_context(struct user_ctxs *user)
251 if (user->fpmr_size != sizeof(*user->fpmr))
254 __get_user_error(fpmr, &user->fpmr->fpmr, err);
256 write_sysreg_s(fpmr, SYS_FPMR);
261 #ifdef CONFIG_ARM64_SVE
263 static int preserve_sve_context(struct sve_context __user *ctx)
266 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
268 unsigned int vl = task_get_sve_vl(current);
271 if (thread_sm_enabled(¤t->thread)) {
272 vl = task_get_sme_vl(current);
273 vq = sve_vq_from_vl(vl);
274 flags |= SVE_SIG_FLAG_SM;
275 } else if (current->thread.fp_type == FP_STATE_SVE) {
276 vq = sve_vq_from_vl(vl);
279 memset(reserved, 0, sizeof(reserved));
281 __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
282 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
283 &ctx->head.size, err);
284 __put_user_error(vl, &ctx->vl, err);
285 __put_user_error(flags, &ctx->flags, err);
286 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
287 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
291 * This assumes that the SVE state has already been saved to
292 * the task struct by calling the function
293 * fpsimd_signal_preserve_current_state().
295 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
296 current->thread.sve_state,
297 SVE_SIG_REGS_SIZE(vq));
300 return err ? -EFAULT : 0;
303 static int restore_sve_fpsimd_context(struct user_ctxs *user)
307 struct user_fpsimd_state fpsimd;
310 if (user->sve_size < sizeof(*user->sve))
313 __get_user_error(user_vl, &(user->sve->vl), err);
314 __get_user_error(flags, &(user->sve->flags), err);
318 if (flags & SVE_SIG_FLAG_SM) {
319 if (!system_supports_sme())
322 vl = task_get_sme_vl(current);
325 * A SME only system use SVE for streaming mode so can
326 * have a SVE formatted context with a zero VL and no
329 if (!system_supports_sve() && !system_supports_sme())
332 vl = task_get_sve_vl(current);
338 if (user->sve_size == sizeof(*user->sve)) {
339 clear_thread_flag(TIF_SVE);
340 current->thread.svcr &= ~SVCR_SM_MASK;
341 current->thread.fp_type = FP_STATE_FPSIMD;
345 vq = sve_vq_from_vl(vl);
347 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
351 * Careful: we are about __copy_from_user() directly into
352 * thread.sve_state with preemption enabled, so protection is
353 * needed to prevent a racing context switch from writing stale
354 * registers back over the new data.
357 fpsimd_flush_task_state(current);
358 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
360 sve_alloc(current, true);
361 if (!current->thread.sve_state) {
362 clear_thread_flag(TIF_SVE);
366 err = __copy_from_user(current->thread.sve_state,
367 (char __user const *)user->sve +
369 SVE_SIG_REGS_SIZE(vq));
373 if (flags & SVE_SIG_FLAG_SM)
374 current->thread.svcr |= SVCR_SM_MASK;
376 set_thread_flag(TIF_SVE);
377 current->thread.fp_type = FP_STATE_SVE;
380 /* copy the FP and status/control registers */
381 /* restore_sigframe() already checked that user->fpsimd != NULL. */
382 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
383 sizeof(fpsimd.vregs));
384 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
385 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
387 /* load the hardware registers from the fpsimd_state structure */
389 fpsimd_update_current_state(&fpsimd);
391 return err ? -EFAULT : 0;
394 #else /* ! CONFIG_ARM64_SVE */
396 static int restore_sve_fpsimd_context(struct user_ctxs *user)
402 /* Turn any non-optimised out attempts to use this into a link error: */
403 extern int preserve_sve_context(void __user *ctx);
405 #endif /* ! CONFIG_ARM64_SVE */
407 #ifdef CONFIG_ARM64_SME
409 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
413 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
415 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
416 __put_user_error(sizeof(*ctx), &ctx->head.size, err);
417 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err);
422 static int restore_tpidr2_context(struct user_ctxs *user)
427 if (user->tpidr2_size != sizeof(*user->tpidr2))
430 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
432 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0);
437 static int preserve_za_context(struct za_context __user *ctx)
440 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
441 unsigned int vl = task_get_sme_vl(current);
444 if (thread_za_enabled(¤t->thread))
445 vq = sve_vq_from_vl(vl);
449 memset(reserved, 0, sizeof(reserved));
451 __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
452 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
453 &ctx->head.size, err);
454 __put_user_error(vl, &ctx->vl, err);
455 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
456 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
460 * This assumes that the ZA state has already been saved to
461 * the task struct by calling the function
462 * fpsimd_signal_preserve_current_state().
464 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
465 current->thread.sme_state,
466 ZA_SIG_REGS_SIZE(vq));
469 return err ? -EFAULT : 0;
472 static int restore_za_context(struct user_ctxs *user)
478 if (user->za_size < sizeof(*user->za))
481 __get_user_error(user_vl, &(user->za->vl), err);
485 if (user_vl != task_get_sme_vl(current))
488 if (user->za_size == sizeof(*user->za)) {
489 current->thread.svcr &= ~SVCR_ZA_MASK;
493 vq = sve_vq_from_vl(user_vl);
495 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
499 * Careful: we are about __copy_from_user() directly into
500 * thread.sme_state with preemption enabled, so protection is
501 * needed to prevent a racing context switch from writing stale
502 * registers back over the new data.
505 fpsimd_flush_task_state(current);
506 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
508 sme_alloc(current, true);
509 if (!current->thread.sme_state) {
510 current->thread.svcr &= ~SVCR_ZA_MASK;
511 clear_thread_flag(TIF_SME);
515 err = __copy_from_user(current->thread.sme_state,
516 (char __user const *)user->za +
518 ZA_SIG_REGS_SIZE(vq));
522 set_thread_flag(TIF_SME);
523 current->thread.svcr |= SVCR_ZA_MASK;
528 static int preserve_zt_context(struct zt_context __user *ctx)
531 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
533 if (WARN_ON(!thread_za_enabled(¤t->thread)))
536 memset(reserved, 0, sizeof(reserved));
538 __put_user_error(ZT_MAGIC, &ctx->head.magic, err);
539 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
540 &ctx->head.size, err);
541 __put_user_error(1, &ctx->nregs, err);
542 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
543 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
546 * This assumes that the ZT state has already been saved to
547 * the task struct by calling the function
548 * fpsimd_signal_preserve_current_state().
550 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
551 thread_zt_state(¤t->thread),
552 ZT_SIG_REGS_SIZE(1));
554 return err ? -EFAULT : 0;
557 static int restore_zt_context(struct user_ctxs *user)
562 /* ZA must be restored first for this check to be valid */
563 if (!thread_za_enabled(¤t->thread))
566 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1))
569 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs)))
576 * Careful: we are about __copy_from_user() directly into
577 * thread.zt_state with preemption enabled, so protection is
578 * needed to prevent a racing context switch from writing stale
579 * registers back over the new data.
582 fpsimd_flush_task_state(current);
583 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */
585 err = __copy_from_user(thread_zt_state(¤t->thread),
586 (char __user const *)user->zt +
588 ZT_SIG_REGS_SIZE(1));
595 #else /* ! CONFIG_ARM64_SME */
597 /* Turn any non-optimised out attempts to use these into a link error: */
598 extern int preserve_tpidr2_context(void __user *ctx);
599 extern int restore_tpidr2_context(struct user_ctxs *user);
600 extern int preserve_za_context(void __user *ctx);
601 extern int restore_za_context(struct user_ctxs *user);
602 extern int preserve_zt_context(void __user *ctx);
603 extern int restore_zt_context(struct user_ctxs *user);
605 #endif /* ! CONFIG_ARM64_SME */
607 static int parse_user_sigframe(struct user_ctxs *user,
608 struct rt_sigframe __user *sf)
610 struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
611 struct _aarch64_ctx __user *head;
612 char __user *base = (char __user *)&sc->__reserved;
614 size_t limit = sizeof(sc->__reserved);
615 bool have_extra_context = false;
616 char const __user *const sfp = (char const __user *)sf;
625 if (!IS_ALIGNED((unsigned long)base, 16))
631 char const __user *userp;
632 struct extra_context const __user *extra;
635 struct _aarch64_ctx const __user *end;
636 u32 end_magic, end_size;
638 if (limit - offset < sizeof(*head))
641 if (!IS_ALIGNED(offset, 16))
644 head = (struct _aarch64_ctx __user *)(base + offset);
645 __get_user_error(magic, &head->magic, err);
646 __get_user_error(size, &head->size, err);
650 if (limit - offset < size)
661 if (!system_supports_fpsimd())
666 user->fpsimd = (struct fpsimd_context __user *)head;
667 user->fpsimd_size = size;
675 if (!system_supports_sve() && !system_supports_sme())
681 user->sve = (struct sve_context __user *)head;
682 user->sve_size = size;
686 if (!system_supports_tpidr2())
692 user->tpidr2 = (struct tpidr2_context __user *)head;
693 user->tpidr2_size = size;
697 if (!system_supports_sme())
703 user->za = (struct za_context __user *)head;
704 user->za_size = size;
708 if (!system_supports_sme2())
714 user->zt = (struct zt_context __user *)head;
715 user->zt_size = size;
719 if (!system_supports_fpmr())
725 user->fpmr = (struct fpmr_context __user *)head;
726 user->fpmr_size = size;
730 if (have_extra_context)
733 if (size < sizeof(*extra))
736 userp = (char const __user *)head;
738 extra = (struct extra_context const __user *)userp;
741 __get_user_error(extra_datap, &extra->datap, err);
742 __get_user_error(extra_size, &extra->size, err);
746 /* Check for the dummy terminator in __reserved[]: */
748 if (limit - offset - size < TERMINATOR_SIZE)
751 end = (struct _aarch64_ctx const __user *)userp;
752 userp += TERMINATOR_SIZE;
754 __get_user_error(end_magic, &end->magic, err);
755 __get_user_error(end_size, &end->size, err);
759 if (end_magic || end_size)
762 /* Prevent looping/repeated parsing of extra_context */
763 have_extra_context = true;
765 base = (__force void __user *)extra_datap;
766 if (!IS_ALIGNED((unsigned long)base, 16))
769 if (!IS_ALIGNED(extra_size, 16))
775 /* Reject "unreasonably large" frames: */
776 if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
780 * Ignore trailing terminator in __reserved[]
781 * and start parsing extra data:
786 if (!access_ok(base, limit))
795 if (size < sizeof(*head))
798 if (limit - offset < size)
811 static int restore_sigframe(struct pt_regs *regs,
812 struct rt_sigframe __user *sf)
816 struct user_ctxs user;
818 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
820 set_current_blocked(&set);
822 for (i = 0; i < 31; i++)
823 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
825 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
826 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
827 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
830 * Avoid sys_rt_sigreturn() restarting.
832 forget_syscall(regs);
834 err |= !valid_user_regs(®s->user_regs, current);
836 err = parse_user_sigframe(&user, sf);
838 if (err == 0 && system_supports_fpsimd()) {
843 err = restore_sve_fpsimd_context(&user);
845 err = restore_fpsimd_context(&user);
848 if (err == 0 && system_supports_tpidr2() && user.tpidr2)
849 err = restore_tpidr2_context(&user);
851 if (err == 0 && system_supports_fpmr() && user.fpmr)
852 err = restore_fpmr_context(&user);
854 if (err == 0 && system_supports_sme() && user.za)
855 err = restore_za_context(&user);
857 if (err == 0 && system_supports_sme2() && user.zt)
858 err = restore_zt_context(&user);
863 SYSCALL_DEFINE0(rt_sigreturn)
865 struct pt_regs *regs = current_pt_regs();
866 struct rt_sigframe __user *frame;
868 /* Always make any pending restarted system calls return -EINTR */
869 current->restart_block.fn = do_no_restart_syscall;
872 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
873 * be word aligned here.
878 frame = (struct rt_sigframe __user *)regs->sp;
880 if (!access_ok(frame, sizeof (*frame)))
883 if (restore_sigframe(regs, frame))
886 if (restore_altstack(&frame->uc.uc_stack))
889 return regs->regs[0];
892 arm64_notify_segfault(regs->sp);
897 * Determine the layout of optional records in the signal frame
899 * add_all: if true, lays out the biggest possible signal frame for
900 * this task; otherwise, generates a layout for the current state
903 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
908 if (system_supports_fpsimd()) {
909 err = sigframe_alloc(user, &user->fpsimd_offset,
910 sizeof(struct fpsimd_context));
915 /* fault information, if valid */
916 if (add_all || current->thread.fault_code) {
917 err = sigframe_alloc(user, &user->esr_offset,
918 sizeof(struct esr_context));
923 if (system_supports_sve() || system_supports_sme()) {
926 if (add_all || current->thread.fp_type == FP_STATE_SVE ||
927 thread_sm_enabled(¤t->thread)) {
928 int vl = max(sve_max_vl(), sme_max_vl());
931 vl = thread_get_cur_vl(¤t->thread);
933 vq = sve_vq_from_vl(vl);
936 err = sigframe_alloc(user, &user->sve_offset,
937 SVE_SIG_CONTEXT_SIZE(vq));
942 if (system_supports_tpidr2()) {
943 err = sigframe_alloc(user, &user->tpidr2_offset,
944 sizeof(struct tpidr2_context));
949 if (system_supports_sme()) {
956 vl = task_get_sme_vl(current);
958 if (thread_za_enabled(¤t->thread))
959 vq = sve_vq_from_vl(vl);
961 err = sigframe_alloc(user, &user->za_offset,
962 ZA_SIG_CONTEXT_SIZE(vq));
967 if (system_supports_sme2()) {
968 if (add_all || thread_za_enabled(¤t->thread)) {
969 err = sigframe_alloc(user, &user->zt_offset,
970 ZT_SIG_CONTEXT_SIZE(1));
976 if (system_supports_fpmr()) {
977 err = sigframe_alloc(user, &user->fpmr_offset,
978 sizeof(struct fpmr_context));
983 return sigframe_alloc_end(user);
986 static int setup_sigframe(struct rt_sigframe_user_layout *user,
987 struct pt_regs *regs, sigset_t *set)
990 struct rt_sigframe __user *sf = user->sigframe;
992 /* set up the stack frame for unwinding */
993 __put_user_error(regs->regs[29], &user->next_frame->fp, err);
994 __put_user_error(regs->regs[30], &user->next_frame->lr, err);
996 for (i = 0; i < 31; i++)
997 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
999 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
1000 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
1001 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
1003 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
1005 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
1007 if (err == 0 && system_supports_fpsimd()) {
1008 struct fpsimd_context __user *fpsimd_ctx =
1009 apply_user_offset(user, user->fpsimd_offset);
1010 err |= preserve_fpsimd_context(fpsimd_ctx);
1013 /* fault information, if valid */
1014 if (err == 0 && user->esr_offset) {
1015 struct esr_context __user *esr_ctx =
1016 apply_user_offset(user, user->esr_offset);
1018 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
1019 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
1020 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
1023 /* Scalable Vector Extension state (including streaming), if present */
1024 if ((system_supports_sve() || system_supports_sme()) &&
1025 err == 0 && user->sve_offset) {
1026 struct sve_context __user *sve_ctx =
1027 apply_user_offset(user, user->sve_offset);
1028 err |= preserve_sve_context(sve_ctx);
1031 /* TPIDR2 if supported */
1032 if (system_supports_tpidr2() && err == 0) {
1033 struct tpidr2_context __user *tpidr2_ctx =
1034 apply_user_offset(user, user->tpidr2_offset);
1035 err |= preserve_tpidr2_context(tpidr2_ctx);
1038 /* FPMR if supported */
1039 if (system_supports_fpmr() && err == 0) {
1040 struct fpmr_context __user *fpmr_ctx =
1041 apply_user_offset(user, user->fpmr_offset);
1042 err |= preserve_fpmr_context(fpmr_ctx);
1045 /* ZA state if present */
1046 if (system_supports_sme() && err == 0 && user->za_offset) {
1047 struct za_context __user *za_ctx =
1048 apply_user_offset(user, user->za_offset);
1049 err |= preserve_za_context(za_ctx);
1052 /* ZT state if present */
1053 if (system_supports_sme2() && err == 0 && user->zt_offset) {
1054 struct zt_context __user *zt_ctx =
1055 apply_user_offset(user, user->zt_offset);
1056 err |= preserve_zt_context(zt_ctx);
1059 if (err == 0 && user->extra_offset) {
1060 char __user *sfp = (char __user *)user->sigframe;
1061 char __user *userp =
1062 apply_user_offset(user, user->extra_offset);
1064 struct extra_context __user *extra;
1065 struct _aarch64_ctx __user *end;
1069 extra = (struct extra_context __user *)userp;
1070 userp += EXTRA_CONTEXT_SIZE;
1072 end = (struct _aarch64_ctx __user *)userp;
1073 userp += TERMINATOR_SIZE;
1076 * extra_datap is just written to the signal frame.
1077 * The value gets cast back to a void __user *
1080 extra_datap = (__force u64)userp;
1081 extra_size = sfp + round_up(user->size, 16) - userp;
1083 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
1084 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
1085 __put_user_error(extra_datap, &extra->datap, err);
1086 __put_user_error(extra_size, &extra->size, err);
1088 /* Add the terminator */
1089 __put_user_error(0, &end->magic, err);
1090 __put_user_error(0, &end->size, err);
1093 /* set the "end" magic */
1095 struct _aarch64_ctx __user *end =
1096 apply_user_offset(user, user->end_offset);
1098 __put_user_error(0, &end->magic, err);
1099 __put_user_error(0, &end->size, err);
1105 static int get_sigframe(struct rt_sigframe_user_layout *user,
1106 struct ksignal *ksig, struct pt_regs *regs)
1108 unsigned long sp, sp_top;
1111 init_user_layout(user);
1112 err = setup_sigframe_layout(user, false);
1116 sp = sp_top = sigsp(regs->sp, ksig);
1118 sp = round_down(sp - sizeof(struct frame_record), 16);
1119 user->next_frame = (struct frame_record __user *)sp;
1121 sp = round_down(sp, 16) - sigframe_size(user);
1122 user->sigframe = (struct rt_sigframe __user *)sp;
1125 * Check that we can actually write to the signal frame.
1127 if (!access_ok(user->sigframe, sp_top - sp))
1133 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
1134 struct rt_sigframe_user_layout *user, int usig)
1136 __sigrestore_t sigtramp;
1138 regs->regs[0] = usig;
1139 regs->sp = (unsigned long)user->sigframe;
1140 regs->regs[29] = (unsigned long)&user->next_frame->fp;
1141 regs->pc = (unsigned long)ka->sa.sa_handler;
1144 * Signal delivery is a (wacky) indirect function call in
1145 * userspace, so simulate the same setting of BTYPE as a BLR
1146 * <register containing the signal handler entry point>.
1147 * Signal delivery to a location in a PROT_BTI guarded page
1148 * that is not a function entry point will now trigger a
1149 * SIGILL in userspace.
1151 * If the signal handler entry point is not in a PROT_BTI
1152 * guarded page, this is harmless.
1154 if (system_supports_bti()) {
1155 regs->pstate &= ~PSR_BTYPE_MASK;
1156 regs->pstate |= PSR_BTYPE_C;
1159 /* TCO (Tag Check Override) always cleared for signal handlers */
1160 regs->pstate &= ~PSR_TCO_BIT;
1162 /* Signal handlers are invoked with ZA and streaming mode disabled */
1163 if (system_supports_sme()) {
1165 * If we were in streaming mode the saved register
1166 * state was SVE but we will exit SM and use the
1167 * FPSIMD register state - flush the saved FPSIMD
1168 * register state in case it gets loaded.
1170 if (current->thread.svcr & SVCR_SM_MASK) {
1171 memset(¤t->thread.uw.fpsimd_state, 0,
1172 sizeof(current->thread.uw.fpsimd_state));
1173 current->thread.fp_type = FP_STATE_FPSIMD;
1176 current->thread.svcr &= ~(SVCR_ZA_MASK |
1181 if (ka->sa.sa_flags & SA_RESTORER)
1182 sigtramp = ka->sa.sa_restorer;
1184 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
1186 regs->regs[30] = (unsigned long)sigtramp;
1189 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
1190 struct pt_regs *regs)
1192 struct rt_sigframe_user_layout user;
1193 struct rt_sigframe __user *frame;
1196 fpsimd_signal_preserve_current_state();
1198 if (get_sigframe(&user, ksig, regs))
1201 frame = user.sigframe;
1203 __put_user_error(0, &frame->uc.uc_flags, err);
1204 __put_user_error(NULL, &frame->uc.uc_link, err);
1206 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1207 err |= setup_sigframe(&user, regs, set);
1209 setup_return(regs, &ksig->ka, &user, usig);
1210 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
1211 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
1212 regs->regs[1] = (unsigned long)&frame->info;
1213 regs->regs[2] = (unsigned long)&frame->uc;
1220 static void setup_restart_syscall(struct pt_regs *regs)
1222 if (is_compat_task())
1223 compat_setup_restart_syscall(regs);
1225 regs->regs[8] = __NR_restart_syscall;
1229 * OK, we're invoking a handler
1231 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1233 sigset_t *oldset = sigmask_to_save();
1234 int usig = ksig->sig;
1237 rseq_signal_deliver(ksig, regs);
1240 * Set up the stack frame
1242 if (is_compat_task()) {
1243 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1244 ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1246 ret = compat_setup_frame(usig, ksig, oldset, regs);
1248 ret = setup_rt_frame(usig, ksig, oldset, regs);
1252 * Check that the resulting registers are actually sane.
1254 ret |= !valid_user_regs(®s->user_regs, current);
1256 /* Step into the signal handler if we are stepping */
1257 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1261 * Note that 'init' is a special process: it doesn't get signals it doesn't
1262 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1265 * Note that we go through the signals twice: once to check the signals that
1266 * the kernel can handle, and then we build all the user-level signal handling
1267 * stack-frames in one go after that.
1269 void do_signal(struct pt_regs *regs)
1271 unsigned long continue_addr = 0, restart_addr = 0;
1273 struct ksignal ksig;
1274 bool syscall = in_syscall(regs);
1277 * If we were from a system call, check for system call restarting...
1280 continue_addr = regs->pc;
1281 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1282 retval = regs->regs[0];
1285 * Avoid additional syscall restarting via ret_to_user.
1287 forget_syscall(regs);
1290 * Prepare for system call restart. We do this here so that a
1291 * debugger will see the already changed PC.
1294 case -ERESTARTNOHAND:
1296 case -ERESTARTNOINTR:
1297 case -ERESTART_RESTARTBLOCK:
1298 regs->regs[0] = regs->orig_x0;
1299 regs->pc = restart_addr;
1305 * Get the signal to deliver. When running under ptrace, at this point
1306 * the debugger may change all of our registers.
1308 if (get_signal(&ksig)) {
1310 * Depending on the signal settings, we may need to revert the
1311 * decision to restart the system call, but skip this if a
1312 * debugger has chosen to restart at a different PC.
1314 if (regs->pc == restart_addr &&
1315 (retval == -ERESTARTNOHAND ||
1316 retval == -ERESTART_RESTARTBLOCK ||
1317 (retval == -ERESTARTSYS &&
1318 !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1319 syscall_set_return_value(current, regs, -EINTR, 0);
1320 regs->pc = continue_addr;
1323 handle_signal(&ksig, regs);
1328 * Handle restarting a different system call. As above, if a debugger
1329 * has chosen to restart at a different PC, ignore the restart.
1331 if (syscall && regs->pc == restart_addr) {
1332 if (retval == -ERESTART_RESTARTBLOCK)
1333 setup_restart_syscall(regs);
1334 user_rewind_single_step(current);
1337 restore_saved_sigmask();
1340 unsigned long __ro_after_init signal_minsigstksz;
1343 * Determine the stack space required for guaranteed signal devliery.
1344 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1345 * cpufeatures setup is assumed to be complete.
1347 void __init minsigstksz_setup(void)
1349 struct rt_sigframe_user_layout user;
1351 init_user_layout(&user);
1354 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1355 * be big enough, but it's our best guess:
1357 if (WARN_ON(setup_sigframe_layout(&user, true)))
1360 signal_minsigstksz = sigframe_size(&user) +
1361 round_up(sizeof(struct frame_record), 16) +
1362 16; /* max alignment padding */
1366 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1367 * changes likely come with new fields that should be added below.
1369 static_assert(NSIGILL == 11);
1370 static_assert(NSIGFPE == 15);
1371 static_assert(NSIGSEGV == 10);
1372 static_assert(NSIGBUS == 5);
1373 static_assert(NSIGTRAP == 6);
1374 static_assert(NSIGCHLD == 6);
1375 static_assert(NSIGSYS == 2);
1376 static_assert(sizeof(siginfo_t) == 128);
1377 static_assert(__alignof__(siginfo_t) == 8);
1378 static_assert(offsetof(siginfo_t, si_signo) == 0x00);
1379 static_assert(offsetof(siginfo_t, si_errno) == 0x04);
1380 static_assert(offsetof(siginfo_t, si_code) == 0x08);
1381 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
1382 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
1383 static_assert(offsetof(siginfo_t, si_tid) == 0x10);
1384 static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
1385 static_assert(offsetof(siginfo_t, si_status) == 0x18);
1386 static_assert(offsetof(siginfo_t, si_utime) == 0x20);
1387 static_assert(offsetof(siginfo_t, si_stime) == 0x28);
1388 static_assert(offsetof(siginfo_t, si_value) == 0x18);
1389 static_assert(offsetof(siginfo_t, si_int) == 0x18);
1390 static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
1391 static_assert(offsetof(siginfo_t, si_addr) == 0x10);
1392 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
1393 static_assert(offsetof(siginfo_t, si_lower) == 0x20);
1394 static_assert(offsetof(siginfo_t, si_upper) == 0x28);
1395 static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
1396 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
1397 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
1398 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1399 static_assert(offsetof(siginfo_t, si_band) == 0x10);
1400 static_assert(offsetof(siginfo_t, si_fd) == 0x18);
1401 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
1402 static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
1403 static_assert(offsetof(siginfo_t, si_arch) == 0x1c);