Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
2c020ed8 CM |
2 | /* |
3 | * Based on arch/arm/kernel/signal.c | |
4 | * | |
5 | * Copyright (C) 1995-2009 Russell King | |
6 | * Copyright (C) 2012 ARM Ltd. | |
2c020ed8 CM |
7 | */ |
8 | ||
94b07c1f | 9 | #include <linux/cache.h> |
fd92d4a5 | 10 | #include <linux/compat.h> |
2c020ed8 | 11 | #include <linux/errno.h> |
20987de3 | 12 | #include <linux/kernel.h> |
2c020ed8 | 13 | #include <linux/signal.h> |
2c020ed8 | 14 | #include <linux/freezer.h> |
47ccb028 | 15 | #include <linux/stddef.h> |
2c020ed8 | 16 | #include <linux/uaccess.h> |
33f08261 | 17 | #include <linux/sizes.h> |
bb4891a6 | 18 | #include <linux/string.h> |
03248add | 19 | #include <linux/resume_user_mode.h> |
2c020ed8 | 20 | #include <linux/ratelimit.h> |
cf7de27a | 21 | #include <linux/syscalls.h> |
2c020ed8 | 22 | |
8d66772e | 23 | #include <asm/daifflags.h> |
2c020ed8 CM |
24 | #include <asm/debug-monitors.h> |
25 | #include <asm/elf.h> | |
8ada7aab | 26 | #include <asm/exception.h> |
2c020ed8 CM |
27 | #include <asm/cacheflush.h> |
28 | #include <asm/ucontext.h> | |
29 | #include <asm/unistd.h> | |
30 | #include <asm/fpsimd.h> | |
17c28958 | 31 | #include <asm/ptrace.h> |
e30e8d46 | 32 | #include <asm/syscall.h> |
2c020ed8 | 33 | #include <asm/signal32.h> |
f71016a8 | 34 | #include <asm/traps.h> |
2c020ed8 CM |
35 | #include <asm/vdso.h> |
36 | ||
37 | /* | |
38 | * Do a signal return; undo the signal stack. These are aligned to 128-bit. | |
39 | */ | |
40 | struct rt_sigframe { | |
41 | struct siginfo info; | |
42 | struct ucontext uc; | |
20987de3 DM |
43 | }; |
44 | ||
45 | struct frame_record { | |
304ef4e8 WD |
46 | u64 fp; |
47 | u64 lr; | |
2c020ed8 CM |
48 | }; |
49 | ||
20987de3 DM |
50 | struct rt_sigframe_user_layout { |
51 | struct rt_sigframe __user *sigframe; | |
52 | struct frame_record __user *next_frame; | |
bb4891a6 DM |
53 | |
54 | unsigned long size; /* size of allocated sigframe data */ | |
55 | unsigned long limit; /* largest allowed size */ | |
56 | ||
57 | unsigned long fpsimd_offset; | |
58 | unsigned long esr_offset; | |
8cd969d2 | 59 | unsigned long sve_offset; |
39e54499 | 60 | unsigned long tpidr2_offset; |
39782210 | 61 | unsigned long za_offset; |
ee072cf7 | 62 | unsigned long zt_offset; |
33f08261 | 63 | unsigned long extra_offset; |
bb4891a6 | 64 | unsigned long end_offset; |
20987de3 DM |
65 | }; |
66 | ||
33f08261 DM |
67 | #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) |
68 | #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) | |
69 | #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) | |
70 | ||
bb4891a6 DM |
71 | static void init_user_layout(struct rt_sigframe_user_layout *user) |
72 | { | |
33f08261 DM |
73 | const size_t reserved_size = |
74 | sizeof(user->sigframe->uc.uc_mcontext.__reserved); | |
75 | ||
bb4891a6 DM |
76 | memset(user, 0, sizeof(*user)); |
77 | user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); | |
78 | ||
33f08261 DM |
79 | user->limit = user->size + reserved_size; |
80 | ||
81 | user->limit -= TERMINATOR_SIZE; | |
82 | user->limit -= EXTRA_CONTEXT_SIZE; | |
83 | /* Reserve space for extension and terminator ^ */ | |
bb4891a6 DM |
84 | } |
85 | ||
86 | static size_t sigframe_size(struct rt_sigframe_user_layout const *user) | |
87 | { | |
88 | return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); | |
89 | } | |
90 | ||
33f08261 DM |
91 | /* |
92 | * Sanity limit on the approximate maximum size of signal frame we'll | |
93 | * try to generate. Stack alignment padding and the frame record are | |
94 | * not taken into account. This limit is not a guarantee and is | |
95 | * NOT ABI. | |
96 | */ | |
7ddcaf78 | 97 | #define SIGFRAME_MAXSZ SZ_256K |
33f08261 DM |
98 | |
99 | static int __sigframe_alloc(struct rt_sigframe_user_layout *user, | |
100 | unsigned long *offset, size_t size, bool extend) | |
101 | { | |
102 | size_t padded_size = round_up(size, 16); | |
103 | ||
104 | if (padded_size > user->limit - user->size && | |
105 | !user->extra_offset && | |
106 | extend) { | |
107 | int ret; | |
108 | ||
109 | user->limit += EXTRA_CONTEXT_SIZE; | |
110 | ret = __sigframe_alloc(user, &user->extra_offset, | |
111 | sizeof(struct extra_context), false); | |
112 | if (ret) { | |
113 | user->limit -= EXTRA_CONTEXT_SIZE; | |
114 | return ret; | |
115 | } | |
116 | ||
117 | /* Reserve space for the __reserved[] terminator */ | |
118 | user->size += TERMINATOR_SIZE; | |
119 | ||
120 | /* | |
121 | * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for | |
122 | * the terminator: | |
123 | */ | |
124 | user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; | |
125 | } | |
126 | ||
127 | /* Still not enough space? Bad luck! */ | |
128 | if (padded_size > user->limit - user->size) | |
129 | return -ENOMEM; | |
130 | ||
131 | *offset = user->size; | |
132 | user->size += padded_size; | |
133 | ||
134 | return 0; | |
135 | } | |
136 | ||
bb4322f7 DM |
137 | /* |
138 | * Allocate space for an optional record of <size> bytes in the user | |
139 | * signal frame. The offset from the signal frame base address to the | |
140 | * allocated block is assigned to *offset. | |
141 | */ | |
142 | static int sigframe_alloc(struct rt_sigframe_user_layout *user, | |
143 | unsigned long *offset, size_t size) | |
144 | { | |
33f08261 DM |
145 | return __sigframe_alloc(user, offset, size, true); |
146 | } | |
bb4322f7 | 147 | |
33f08261 DM |
148 | /* Allocate the null terminator record and prevent further allocations */ |
149 | static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) | |
150 | { | |
151 | int ret; | |
bb4322f7 | 152 | |
33f08261 DM |
153 | /* Un-reserve the space reserved for the terminator: */ |
154 | user->limit += TERMINATOR_SIZE; | |
155 | ||
156 | ret = sigframe_alloc(user, &user->end_offset, | |
157 | sizeof(struct _aarch64_ctx)); | |
158 | if (ret) | |
159 | return ret; | |
160 | ||
161 | /* Prevent further allocation: */ | |
162 | user->limit = user->size; | |
bb4322f7 DM |
163 | return 0; |
164 | } | |
165 | ||
bb4891a6 DM |
166 | static void __user *apply_user_offset( |
167 | struct rt_sigframe_user_layout const *user, unsigned long offset) | |
168 | { | |
169 | char __user *base = (char __user *)user->sigframe; | |
170 | ||
171 | return base + offset; | |
172 | } | |
173 | ||
4e4e9304 MB |
174 | struct user_ctxs { |
175 | struct fpsimd_context __user *fpsimd; | |
b57682b3 | 176 | u32 fpsimd_size; |
4e4e9304 | 177 | struct sve_context __user *sve; |
b57682b3 | 178 | u32 sve_size; |
4e4e9304 | 179 | struct tpidr2_context __user *tpidr2; |
b57682b3 | 180 | u32 tpidr2_size; |
4e4e9304 | 181 | struct za_context __user *za; |
b57682b3 | 182 | u32 za_size; |
4e4e9304 | 183 | struct zt_context __user *zt; |
b57682b3 | 184 | u32 zt_size; |
4e4e9304 MB |
185 | }; |
186 | ||
2c020ed8 CM |
187 | static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) |
188 | { | |
65896545 DM |
189 | struct user_fpsimd_state const *fpsimd = |
190 | ¤t->thread.uw.fpsimd_state; | |
2c020ed8 CM |
191 | int err; |
192 | ||
2c020ed8 CM |
193 | /* copy the FP and status/control registers */ |
194 | err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); | |
195 | __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); | |
196 | __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); | |
197 | ||
198 | /* copy the magic/size information */ | |
199 | __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); | |
200 | __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); | |
201 | ||
202 | return err ? -EFAULT : 0; | |
203 | } | |
204 | ||
4e4e9304 | 205 | static int restore_fpsimd_context(struct user_ctxs *user) |
2c020ed8 | 206 | { |
0abdeff5 | 207 | struct user_fpsimd_state fpsimd; |
2c020ed8 CM |
208 | int err = 0; |
209 | ||
92f14518 | 210 | /* check the size information */ |
b57682b3 | 211 | if (user->fpsimd_size != sizeof(struct fpsimd_context)) |
2c020ed8 CM |
212 | return -EINVAL; |
213 | ||
214 | /* copy the FP and status/control registers */ | |
4e4e9304 | 215 | err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), |
2c020ed8 | 216 | sizeof(fpsimd.vregs)); |
4e4e9304 MB |
217 | __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); |
218 | __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); | |
2c020ed8 | 219 | |
8cd969d2 | 220 | clear_thread_flag(TIF_SVE); |
baa85152 | 221 | current->thread.fp_type = FP_STATE_FPSIMD; |
8cd969d2 | 222 | |
2c020ed8 | 223 | /* load the hardware registers from the fpsimd_state structure */ |
c51f9269 AB |
224 | if (!err) |
225 | fpsimd_update_current_state(&fpsimd); | |
2c020ed8 CM |
226 | |
227 | return err ? -EFAULT : 0; | |
228 | } | |
229 | ||
8cd969d2 | 230 | |
8cd969d2 DM |
231 | #ifdef CONFIG_ARM64_SVE |
232 | ||
233 | static int preserve_sve_context(struct sve_context __user *ctx) | |
234 | { | |
235 | int err = 0; | |
236 | u16 reserved[ARRAY_SIZE(ctx->__reserved)]; | |
85ed24da | 237 | u16 flags = 0; |
0423eedc | 238 | unsigned int vl = task_get_sve_vl(current); |
8cd969d2 DM |
239 | unsigned int vq = 0; |
240 | ||
85ed24da MB |
241 | if (thread_sm_enabled(¤t->thread)) { |
242 | vl = task_get_sme_vl(current); | |
8cd969d2 | 243 | vq = sve_vq_from_vl(vl); |
85ed24da MB |
244 | flags |= SVE_SIG_FLAG_SM; |
245 | } else if (test_thread_flag(TIF_SVE)) { | |
246 | vq = sve_vq_from_vl(vl); | |
247 | } | |
8cd969d2 DM |
248 | |
249 | memset(reserved, 0, sizeof(reserved)); | |
250 | ||
251 | __put_user_error(SVE_MAGIC, &ctx->head.magic, err); | |
252 | __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), | |
253 | &ctx->head.size, err); | |
254 | __put_user_error(vl, &ctx->vl, err); | |
85ed24da | 255 | __put_user_error(flags, &ctx->flags, err); |
8cd969d2 DM |
256 | BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); |
257 | err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); | |
258 | ||
259 | if (vq) { | |
260 | /* | |
261 | * This assumes that the SVE state has already been saved to | |
68a4c52e JG |
262 | * the task struct by calling the function |
263 | * fpsimd_signal_preserve_current_state(). | |
8cd969d2 DM |
264 | */ |
265 | err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, | |
266 | current->thread.sve_state, | |
267 | SVE_SIG_REGS_SIZE(vq)); | |
268 | } | |
269 | ||
270 | return err ? -EFAULT : 0; | |
271 | } | |
272 | ||
273 | static int restore_sve_fpsimd_context(struct user_ctxs *user) | |
274 | { | |
f3ac48aa | 275 | int err = 0; |
85ed24da | 276 | unsigned int vl, vq; |
0abdeff5 | 277 | struct user_fpsimd_state fpsimd; |
f3ac48aa | 278 | u16 user_vl, flags; |
8cd969d2 | 279 | |
b57682b3 MB |
280 | if (user->sve_size < sizeof(*user->sve)) |
281 | return -EINVAL; | |
282 | ||
f3ac48aa MB |
283 | __get_user_error(user_vl, &(user->sve->vl), err); |
284 | __get_user_error(flags, &(user->sve->flags), err); | |
285 | if (err) | |
286 | return err; | |
8cd969d2 | 287 | |
f3ac48aa | 288 | if (flags & SVE_SIG_FLAG_SM) { |
85ed24da MB |
289 | if (!system_supports_sme()) |
290 | return -EINVAL; | |
291 | ||
292 | vl = task_get_sme_vl(current); | |
293 | } else { | |
7dde62f0 MB |
294 | /* |
295 | * A SME only system use SVE for streaming mode so can | |
296 | * have a SVE formatted context with a zero VL and no | |
297 | * payload data. | |
298 | */ | |
299 | if (!system_supports_sve() && !system_supports_sme()) | |
df07443f MB |
300 | return -EINVAL; |
301 | ||
85ed24da MB |
302 | vl = task_get_sve_vl(current); |
303 | } | |
304 | ||
f3ac48aa | 305 | if (user_vl != vl) |
8cd969d2 DM |
306 | return -EINVAL; |
307 | ||
b57682b3 | 308 | if (user->sve_size == sizeof(*user->sve)) { |
8cd969d2 | 309 | clear_thread_flag(TIF_SVE); |
ec0067a6 | 310 | current->thread.svcr &= ~SVCR_SM_MASK; |
baa85152 | 311 | current->thread.fp_type = FP_STATE_FPSIMD; |
8cd969d2 DM |
312 | goto fpsimd_only; |
313 | } | |
314 | ||
f3ac48aa | 315 | vq = sve_vq_from_vl(vl); |
8cd969d2 | 316 | |
b57682b3 | 317 | if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) |
8cd969d2 DM |
318 | return -EINVAL; |
319 | ||
320 | /* | |
321 | * Careful: we are about __copy_from_user() directly into | |
322 | * thread.sve_state with preemption enabled, so protection is | |
323 | * needed to prevent a racing context switch from writing stale | |
324 | * registers back over the new data. | |
325 | */ | |
326 | ||
327 | fpsimd_flush_task_state(current); | |
8cd969d2 DM |
328 | /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ |
329 | ||
826a4fdd | 330 | sve_alloc(current, true); |
7559b7d7 MB |
331 | if (!current->thread.sve_state) { |
332 | clear_thread_flag(TIF_SVE); | |
333 | return -ENOMEM; | |
334 | } | |
335 | ||
8cd969d2 DM |
336 | err = __copy_from_user(current->thread.sve_state, |
337 | (char __user const *)user->sve + | |
338 | SVE_SIG_REGS_OFFSET, | |
339 | SVE_SIG_REGS_SIZE(vq)); | |
340 | if (err) | |
341 | return -EFAULT; | |
342 | ||
f3ac48aa | 343 | if (flags & SVE_SIG_FLAG_SM) |
ec0067a6 | 344 | current->thread.svcr |= SVCR_SM_MASK; |
85ed24da MB |
345 | else |
346 | set_thread_flag(TIF_SVE); | |
baa85152 | 347 | current->thread.fp_type = FP_STATE_SVE; |
8cd969d2 DM |
348 | |
349 | fpsimd_only: | |
350 | /* copy the FP and status/control registers */ | |
351 | /* restore_sigframe() already checked that user->fpsimd != NULL. */ | |
352 | err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, | |
353 | sizeof(fpsimd.vregs)); | |
354 | __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); | |
355 | __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); | |
356 | ||
357 | /* load the hardware registers from the fpsimd_state structure */ | |
358 | if (!err) | |
359 | fpsimd_update_current_state(&fpsimd); | |
360 | ||
361 | return err ? -EFAULT : 0; | |
362 | } | |
363 | ||
364 | #else /* ! CONFIG_ARM64_SVE */ | |
365 | ||
df07443f MB |
366 | static int restore_sve_fpsimd_context(struct user_ctxs *user) |
367 | { | |
368 | WARN_ON_ONCE(1); | |
369 | return -EINVAL; | |
370 | } | |
371 | ||
372 | /* Turn any non-optimised out attempts to use this into a link error: */ | |
8cd969d2 | 373 | extern int preserve_sve_context(void __user *ctx); |
8cd969d2 DM |
374 | |
375 | #endif /* ! CONFIG_ARM64_SVE */ | |
376 | ||
39782210 MB |
377 | #ifdef CONFIG_ARM64_SME |
378 | ||
39e54499 MB |
379 | static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) |
380 | { | |
381 | int err = 0; | |
382 | ||
383 | current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); | |
384 | ||
385 | __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); | |
386 | __put_user_error(sizeof(*ctx), &ctx->head.size, err); | |
387 | __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); | |
388 | ||
389 | return err; | |
390 | } | |
391 | ||
392 | static int restore_tpidr2_context(struct user_ctxs *user) | |
393 | { | |
394 | u64 tpidr2_el0; | |
395 | int err = 0; | |
396 | ||
b57682b3 MB |
397 | if (user->tpidr2_size != sizeof(*user->tpidr2)) |
398 | return -EINVAL; | |
399 | ||
39e54499 MB |
400 | __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); |
401 | if (!err) | |
616cb2f4 | 402 | write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); |
39e54499 MB |
403 | |
404 | return err; | |
405 | } | |
406 | ||
39782210 MB |
407 | static int preserve_za_context(struct za_context __user *ctx) |
408 | { | |
409 | int err = 0; | |
410 | u16 reserved[ARRAY_SIZE(ctx->__reserved)]; | |
411 | unsigned int vl = task_get_sme_vl(current); | |
412 | unsigned int vq; | |
413 | ||
414 | if (thread_za_enabled(¤t->thread)) | |
415 | vq = sve_vq_from_vl(vl); | |
416 | else | |
417 | vq = 0; | |
418 | ||
419 | memset(reserved, 0, sizeof(reserved)); | |
420 | ||
421 | __put_user_error(ZA_MAGIC, &ctx->head.magic, err); | |
422 | __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), | |
423 | &ctx->head.size, err); | |
424 | __put_user_error(vl, &ctx->vl, err); | |
425 | BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); | |
426 | err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); | |
427 | ||
428 | if (vq) { | |
429 | /* | |
430 | * This assumes that the ZA state has already been saved to | |
431 | * the task struct by calling the function | |
432 | * fpsimd_signal_preserve_current_state(). | |
433 | */ | |
434 | err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, | |
ce514000 | 435 | current->thread.sme_state, |
39782210 MB |
436 | ZA_SIG_REGS_SIZE(vq)); |
437 | } | |
438 | ||
439 | return err ? -EFAULT : 0; | |
440 | } | |
441 | ||
1bec877b | 442 | static int restore_za_context(struct user_ctxs *user) |
39782210 | 443 | { |
24d68345 | 444 | int err = 0; |
39782210 | 445 | unsigned int vq; |
24d68345 | 446 | u16 user_vl; |
39782210 | 447 | |
b57682b3 MB |
448 | if (user->za_size < sizeof(*user->za)) |
449 | return -EINVAL; | |
39782210 | 450 | |
24d68345 MB |
451 | __get_user_error(user_vl, &(user->za->vl), err); |
452 | if (err) | |
453 | return err; | |
39782210 | 454 | |
24d68345 | 455 | if (user_vl != task_get_sme_vl(current)) |
39782210 MB |
456 | return -EINVAL; |
457 | ||
b57682b3 | 458 | if (user->za_size == sizeof(*user->za)) { |
ec0067a6 | 459 | current->thread.svcr &= ~SVCR_ZA_MASK; |
39782210 MB |
460 | return 0; |
461 | } | |
462 | ||
24d68345 | 463 | vq = sve_vq_from_vl(user_vl); |
39782210 | 464 | |
b57682b3 | 465 | if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) |
39782210 MB |
466 | return -EINVAL; |
467 | ||
468 | /* | |
469 | * Careful: we are about __copy_from_user() directly into | |
ce514000 | 470 | * thread.sme_state with preemption enabled, so protection is |
39782210 MB |
471 | * needed to prevent a racing context switch from writing stale |
472 | * registers back over the new data. | |
473 | */ | |
474 | ||
475 | fpsimd_flush_task_state(current); | |
476 | /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ | |
477 | ||
5d0a8d2f | 478 | sme_alloc(current, true); |
ce514000 | 479 | if (!current->thread.sme_state) { |
ec0067a6 | 480 | current->thread.svcr &= ~SVCR_ZA_MASK; |
39782210 MB |
481 | clear_thread_flag(TIF_SME); |
482 | return -ENOMEM; | |
483 | } | |
484 | ||
ce514000 | 485 | err = __copy_from_user(current->thread.sme_state, |
39782210 MB |
486 | (char __user const *)user->za + |
487 | ZA_SIG_REGS_OFFSET, | |
488 | ZA_SIG_REGS_SIZE(vq)); | |
489 | if (err) | |
490 | return -EFAULT; | |
491 | ||
492 | set_thread_flag(TIF_SME); | |
ec0067a6 | 493 | current->thread.svcr |= SVCR_ZA_MASK; |
39782210 MB |
494 | |
495 | return 0; | |
496 | } | |
ee072cf7 MB |
497 | |
498 | static int preserve_zt_context(struct zt_context __user *ctx) | |
499 | { | |
500 | int err = 0; | |
501 | u16 reserved[ARRAY_SIZE(ctx->__reserved)]; | |
502 | ||
503 | if (WARN_ON(!thread_za_enabled(¤t->thread))) | |
504 | return -EINVAL; | |
505 | ||
506 | memset(reserved, 0, sizeof(reserved)); | |
507 | ||
508 | __put_user_error(ZT_MAGIC, &ctx->head.magic, err); | |
509 | __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), | |
510 | &ctx->head.size, err); | |
511 | __put_user_error(1, &ctx->nregs, err); | |
512 | BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); | |
513 | err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); | |
514 | ||
515 | /* | |
516 | * This assumes that the ZT state has already been saved to | |
517 | * the task struct by calling the function | |
518 | * fpsimd_signal_preserve_current_state(). | |
519 | */ | |
520 | err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, | |
521 | thread_zt_state(¤t->thread), | |
522 | ZT_SIG_REGS_SIZE(1)); | |
523 | ||
524 | return err ? -EFAULT : 0; | |
525 | } | |
526 | ||
527 | static int restore_zt_context(struct user_ctxs *user) | |
528 | { | |
529 | int err; | |
ad678be4 | 530 | u16 nregs; |
ee072cf7 MB |
531 | |
532 | /* ZA must be restored first for this check to be valid */ | |
533 | if (!thread_za_enabled(¤t->thread)) | |
534 | return -EINVAL; | |
535 | ||
b57682b3 MB |
536 | if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) |
537 | return -EINVAL; | |
538 | ||
ad678be4 | 539 | if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) |
ee072cf7 MB |
540 | return -EFAULT; |
541 | ||
ad678be4 | 542 | if (nregs != 1) |
ee072cf7 MB |
543 | return -EINVAL; |
544 | ||
ee072cf7 MB |
545 | /* |
546 | * Careful: we are about __copy_from_user() directly into | |
547 | * thread.zt_state with preemption enabled, so protection is | |
548 | * needed to prevent a racing context switch from writing stale | |
549 | * registers back over the new data. | |
550 | */ | |
551 | ||
552 | fpsimd_flush_task_state(current); | |
553 | /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ | |
554 | ||
555 | err = __copy_from_user(thread_zt_state(¤t->thread), | |
556 | (char __user const *)user->zt + | |
557 | ZT_SIG_REGS_OFFSET, | |
558 | ZT_SIG_REGS_SIZE(1)); | |
559 | if (err) | |
560 | return -EFAULT; | |
561 | ||
562 | return 0; | |
563 | } | |
564 | ||
39782210 MB |
565 | #else /* ! CONFIG_ARM64_SME */ |
566 | ||
567 | /* Turn any non-optimised out attempts to use these into a link error: */ | |
39e54499 MB |
568 | extern int preserve_tpidr2_context(void __user *ctx); |
569 | extern int restore_tpidr2_context(struct user_ctxs *user); | |
39782210 MB |
570 | extern int preserve_za_context(void __user *ctx); |
571 | extern int restore_za_context(struct user_ctxs *user); | |
ee072cf7 MB |
572 | extern int preserve_zt_context(void __user *ctx); |
573 | extern int restore_zt_context(struct user_ctxs *user); | |
39782210 MB |
574 | |
575 | #endif /* ! CONFIG_ARM64_SME */ | |
8cd969d2 | 576 | |
47ccb028 DM |
577 | static int parse_user_sigframe(struct user_ctxs *user, |
578 | struct rt_sigframe __user *sf) | |
579 | { | |
580 | struct sigcontext __user *const sc = &sf->uc.uc_mcontext; | |
bb4891a6 DM |
581 | struct _aarch64_ctx __user *head; |
582 | char __user *base = (char __user *)&sc->__reserved; | |
47ccb028 | 583 | size_t offset = 0; |
bb4891a6 | 584 | size_t limit = sizeof(sc->__reserved); |
33f08261 DM |
585 | bool have_extra_context = false; |
586 | char const __user *const sfp = (char const __user *)sf; | |
47ccb028 DM |
587 | |
588 | user->fpsimd = NULL; | |
8cd969d2 | 589 | user->sve = NULL; |
39e54499 | 590 | user->tpidr2 = NULL; |
39782210 | 591 | user->za = NULL; |
ee072cf7 | 592 | user->zt = NULL; |
47ccb028 | 593 | |
bb4891a6 DM |
594 | if (!IS_ALIGNED((unsigned long)base, 16)) |
595 | goto invalid; | |
596 | ||
47ccb028 | 597 | while (1) { |
bb4891a6 | 598 | int err = 0; |
47ccb028 | 599 | u32 magic, size; |
33f08261 DM |
600 | char const __user *userp; |
601 | struct extra_context const __user *extra; | |
602 | u64 extra_datap; | |
603 | u32 extra_size; | |
604 | struct _aarch64_ctx const __user *end; | |
605 | u32 end_magic, end_size; | |
47ccb028 | 606 | |
bb4891a6 | 607 | if (limit - offset < sizeof(*head)) |
47ccb028 DM |
608 | goto invalid; |
609 | ||
bb4891a6 DM |
610 | if (!IS_ALIGNED(offset, 16)) |
611 | goto invalid; | |
612 | ||
613 | head = (struct _aarch64_ctx __user *)(base + offset); | |
47ccb028 DM |
614 | __get_user_error(magic, &head->magic, err); |
615 | __get_user_error(size, &head->size, err); | |
616 | if (err) | |
617 | return err; | |
618 | ||
bb4891a6 DM |
619 | if (limit - offset < size) |
620 | goto invalid; | |
621 | ||
47ccb028 DM |
622 | switch (magic) { |
623 | case 0: | |
624 | if (size) | |
625 | goto invalid; | |
626 | ||
627 | goto done; | |
628 | ||
629 | case FPSIMD_MAGIC: | |
6d502b6b SP |
630 | if (!system_supports_fpsimd()) |
631 | goto invalid; | |
47ccb028 DM |
632 | if (user->fpsimd) |
633 | goto invalid; | |
634 | ||
47ccb028 | 635 | user->fpsimd = (struct fpsimd_context __user *)head; |
b57682b3 | 636 | user->fpsimd_size = size; |
47ccb028 DM |
637 | break; |
638 | ||
639 | case ESR_MAGIC: | |
640 | /* ignore */ | |
641 | break; | |
642 | ||
8cd969d2 | 643 | case SVE_MAGIC: |
85ed24da | 644 | if (!system_supports_sve() && !system_supports_sme()) |
8cd969d2 DM |
645 | goto invalid; |
646 | ||
647 | if (user->sve) | |
648 | goto invalid; | |
649 | ||
8cd969d2 | 650 | user->sve = (struct sve_context __user *)head; |
b57682b3 | 651 | user->sve_size = size; |
8cd969d2 DM |
652 | break; |
653 | ||
39e54499 | 654 | case TPIDR2_MAGIC: |
e9d14f3f | 655 | if (!system_supports_tpidr2()) |
8cd969d2 DM |
656 | goto invalid; |
657 | ||
39e54499 MB |
658 | if (user->tpidr2) |
659 | goto invalid; | |
660 | ||
39e54499 | 661 | user->tpidr2 = (struct tpidr2_context __user *)head; |
b57682b3 | 662 | user->tpidr2_size = size; |
8cd969d2 DM |
663 | break; |
664 | ||
39782210 MB |
665 | case ZA_MAGIC: |
666 | if (!system_supports_sme()) | |
667 | goto invalid; | |
668 | ||
669 | if (user->za) | |
670 | goto invalid; | |
671 | ||
39782210 | 672 | user->za = (struct za_context __user *)head; |
b57682b3 | 673 | user->za_size = size; |
39782210 MB |
674 | break; |
675 | ||
ee072cf7 MB |
676 | case ZT_MAGIC: |
677 | if (!system_supports_sme2()) | |
39782210 MB |
678 | goto invalid; |
679 | ||
ee072cf7 MB |
680 | if (user->zt) |
681 | goto invalid; | |
682 | ||
ee072cf7 | 683 | user->zt = (struct zt_context __user *)head; |
b57682b3 | 684 | user->zt_size = size; |
39782210 MB |
685 | break; |
686 | ||
33f08261 DM |
687 | case EXTRA_MAGIC: |
688 | if (have_extra_context) | |
689 | goto invalid; | |
690 | ||
691 | if (size < sizeof(*extra)) | |
692 | goto invalid; | |
693 | ||
694 | userp = (char const __user *)head; | |
695 | ||
696 | extra = (struct extra_context const __user *)userp; | |
697 | userp += size; | |
698 | ||
699 | __get_user_error(extra_datap, &extra->datap, err); | |
700 | __get_user_error(extra_size, &extra->size, err); | |
701 | if (err) | |
702 | return err; | |
703 | ||
704 | /* Check for the dummy terminator in __reserved[]: */ | |
705 | ||
706 | if (limit - offset - size < TERMINATOR_SIZE) | |
707 | goto invalid; | |
708 | ||
709 | end = (struct _aarch64_ctx const __user *)userp; | |
710 | userp += TERMINATOR_SIZE; | |
711 | ||
712 | __get_user_error(end_magic, &end->magic, err); | |
713 | __get_user_error(end_size, &end->size, err); | |
714 | if (err) | |
715 | return err; | |
716 | ||
717 | if (end_magic || end_size) | |
718 | goto invalid; | |
719 | ||
720 | /* Prevent looping/repeated parsing of extra_context */ | |
721 | have_extra_context = true; | |
722 | ||
723 | base = (__force void __user *)extra_datap; | |
724 | if (!IS_ALIGNED((unsigned long)base, 16)) | |
725 | goto invalid; | |
726 | ||
727 | if (!IS_ALIGNED(extra_size, 16)) | |
728 | goto invalid; | |
729 | ||
730 | if (base != userp) | |
731 | goto invalid; | |
732 | ||
733 | /* Reject "unreasonably large" frames: */ | |
734 | if (extra_size > sfp + SIGFRAME_MAXSZ - userp) | |
735 | goto invalid; | |
736 | ||
737 | /* | |
738 | * Ignore trailing terminator in __reserved[] | |
739 | * and start parsing extra data: | |
740 | */ | |
741 | offset = 0; | |
742 | limit = extra_size; | |
abf73988 | 743 | |
96d4f267 | 744 | if (!access_ok(base, limit)) |
abf73988 DM |
745 | goto invalid; |
746 | ||
33f08261 DM |
747 | continue; |
748 | ||
47ccb028 DM |
749 | default: |
750 | goto invalid; | |
751 | } | |
752 | ||
753 | if (size < sizeof(*head)) | |
754 | goto invalid; | |
755 | ||
bb4891a6 | 756 | if (limit - offset < size) |
47ccb028 DM |
757 | goto invalid; |
758 | ||
759 | offset += size; | |
760 | } | |
761 | ||
762 | done: | |
47ccb028 DM |
763 | return 0; |
764 | ||
765 | invalid: | |
766 | return -EINVAL; | |
767 | } | |
768 | ||
2c020ed8 CM |
769 | static int restore_sigframe(struct pt_regs *regs, |
770 | struct rt_sigframe __user *sf) | |
771 | { | |
772 | sigset_t set; | |
773 | int i, err; | |
47ccb028 | 774 | struct user_ctxs user; |
2c020ed8 CM |
775 | |
776 | err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); | |
777 | if (err == 0) | |
778 | set_current_blocked(&set); | |
779 | ||
780 | for (i = 0; i < 31; i++) | |
781 | __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], | |
782 | err); | |
783 | __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); | |
784 | __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); | |
785 | __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); | |
786 | ||
787 | /* | |
788 | * Avoid sys_rt_sigreturn() restarting. | |
789 | */ | |
17c28958 | 790 | forget_syscall(regs); |
2c020ed8 | 791 | |
dbd4d7ca | 792 | err |= !valid_user_regs(®s->user_regs, current); |
47ccb028 DM |
793 | if (err == 0) |
794 | err = parse_user_sigframe(&user, sf); | |
2c020ed8 | 795 | |
6d502b6b | 796 | if (err == 0 && system_supports_fpsimd()) { |
8cd969d2 DM |
797 | if (!user.fpsimd) |
798 | return -EINVAL; | |
799 | ||
df07443f | 800 | if (user.sve) |
8cd969d2 | 801 | err = restore_sve_fpsimd_context(&user); |
df07443f | 802 | else |
4e4e9304 | 803 | err = restore_fpsimd_context(&user); |
8cd969d2 | 804 | } |
2c020ed8 | 805 | |
e9d14f3f | 806 | if (err == 0 && system_supports_tpidr2() && user.tpidr2) |
39e54499 MB |
807 | err = restore_tpidr2_context(&user); |
808 | ||
39782210 MB |
809 | if (err == 0 && system_supports_sme() && user.za) |
810 | err = restore_za_context(&user); | |
811 | ||
ee072cf7 MB |
812 | if (err == 0 && system_supports_sme2() && user.zt) |
813 | err = restore_zt_context(&user); | |
814 | ||
2c020ed8 CM |
815 | return err; |
816 | } | |
817 | ||
bf4ce5cc | 818 | SYSCALL_DEFINE0(rt_sigreturn) |
2c020ed8 | 819 | { |
3085e164 | 820 | struct pt_regs *regs = current_pt_regs(); |
2c020ed8 CM |
821 | struct rt_sigframe __user *frame; |
822 | ||
823 | /* Always make any pending restarted system calls return -EINTR */ | |
f56141e3 | 824 | current->restart_block.fn = do_no_restart_syscall; |
2c020ed8 CM |
825 | |
826 | /* | |
827 | * Since we stacked the signal on a 128-bit boundary, then 'sp' should | |
828 | * be word aligned here. | |
829 | */ | |
830 | if (regs->sp & 15) | |
831 | goto badframe; | |
832 | ||
833 | frame = (struct rt_sigframe __user *)regs->sp; | |
834 | ||
96d4f267 | 835 | if (!access_ok(frame, sizeof (*frame))) |
2c020ed8 CM |
836 | goto badframe; |
837 | ||
838 | if (restore_sigframe(regs, frame)) | |
839 | goto badframe; | |
840 | ||
207bdae4 | 841 | if (restore_altstack(&frame->uc.uc_stack)) |
2c020ed8 CM |
842 | goto badframe; |
843 | ||
844 | return regs->regs[0]; | |
845 | ||
846 | badframe: | |
f71016a8 | 847 | arm64_notify_segfault(regs->sp); |
2c020ed8 CM |
848 | return 0; |
849 | } | |
850 | ||
94b07c1f DM |
851 | /* |
852 | * Determine the layout of optional records in the signal frame | |
853 | * | |
854 | * add_all: if true, lays out the biggest possible signal frame for | |
855 | * this task; otherwise, generates a layout for the current state | |
856 | * of the task. | |
857 | */ | |
858 | static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, | |
859 | bool add_all) | |
bb4891a6 | 860 | { |
bb4322f7 DM |
861 | int err; |
862 | ||
0a32c88d DE |
863 | if (system_supports_fpsimd()) { |
864 | err = sigframe_alloc(user, &user->fpsimd_offset, | |
865 | sizeof(struct fpsimd_context)); | |
866 | if (err) | |
867 | return err; | |
868 | } | |
bb4891a6 DM |
869 | |
870 | /* fault information, if valid */ | |
94b07c1f | 871 | if (add_all || current->thread.fault_code) { |
bb4322f7 DM |
872 | err = sigframe_alloc(user, &user->esr_offset, |
873 | sizeof(struct esr_context)); | |
874 | if (err) | |
875 | return err; | |
bb4891a6 DM |
876 | } |
877 | ||
f26cd737 | 878 | if (system_supports_sve() || system_supports_sme()) { |
8cd969d2 DM |
879 | unsigned int vq = 0; |
880 | ||
85ed24da MB |
881 | if (add_all || test_thread_flag(TIF_SVE) || |
882 | thread_sm_enabled(¤t->thread)) { | |
883 | int vl = max(sve_max_vl(), sme_max_vl()); | |
94b07c1f DM |
884 | |
885 | if (!add_all) | |
85ed24da | 886 | vl = thread_get_cur_vl(¤t->thread); |
94b07c1f DM |
887 | |
888 | vq = sve_vq_from_vl(vl); | |
889 | } | |
8cd969d2 DM |
890 | |
891 | err = sigframe_alloc(user, &user->sve_offset, | |
892 | SVE_SIG_CONTEXT_SIZE(vq)); | |
893 | if (err) | |
894 | return err; | |
895 | } | |
896 | ||
19e99e7d DS |
897 | if (system_supports_tpidr2()) { |
898 | err = sigframe_alloc(user, &user->tpidr2_offset, | |
899 | sizeof(struct tpidr2_context)); | |
900 | if (err) | |
901 | return err; | |
902 | } | |
903 | ||
39782210 MB |
904 | if (system_supports_sme()) { |
905 | unsigned int vl; | |
906 | unsigned int vq = 0; | |
907 | ||
908 | if (add_all) | |
909 | vl = sme_max_vl(); | |
910 | else | |
911 | vl = task_get_sme_vl(current); | |
912 | ||
913 | if (thread_za_enabled(¤t->thread)) | |
914 | vq = sve_vq_from_vl(vl); | |
915 | ||
916 | err = sigframe_alloc(user, &user->za_offset, | |
917 | ZA_SIG_CONTEXT_SIZE(vq)); | |
918 | if (err) | |
919 | return err; | |
920 | } | |
921 | ||
ee072cf7 MB |
922 | if (system_supports_sme2()) { |
923 | if (add_all || thread_za_enabled(¤t->thread)) { | |
924 | err = sigframe_alloc(user, &user->zt_offset, | |
925 | ZT_SIG_CONTEXT_SIZE(1)); | |
926 | if (err) | |
927 | return err; | |
928 | } | |
929 | } | |
930 | ||
33f08261 | 931 | return sigframe_alloc_end(user); |
bb4891a6 DM |
932 | } |
933 | ||
20987de3 | 934 | static int setup_sigframe(struct rt_sigframe_user_layout *user, |
2c020ed8 CM |
935 | struct pt_regs *regs, sigset_t *set) |
936 | { | |
937 | int i, err = 0; | |
20987de3 | 938 | struct rt_sigframe __user *sf = user->sigframe; |
2c020ed8 | 939 | |
304ef4e8 | 940 | /* set up the stack frame for unwinding */ |
20987de3 DM |
941 | __put_user_error(regs->regs[29], &user->next_frame->fp, err); |
942 | __put_user_error(regs->regs[30], &user->next_frame->lr, err); | |
304ef4e8 | 943 | |
2c020ed8 CM |
944 | for (i = 0; i < 31; i++) |
945 | __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], | |
946 | err); | |
947 | __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); | |
948 | __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); | |
949 | __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); | |
950 | ||
951 | __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); | |
952 | ||
953 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); | |
954 | ||
6d502b6b | 955 | if (err == 0 && system_supports_fpsimd()) { |
bb4891a6 DM |
956 | struct fpsimd_context __user *fpsimd_ctx = |
957 | apply_user_offset(user, user->fpsimd_offset); | |
0e0276d1 | 958 | err |= preserve_fpsimd_context(fpsimd_ctx); |
0e0276d1 | 959 | } |
2c020ed8 | 960 | |
15af1942 | 961 | /* fault information, if valid */ |
bb4891a6 DM |
962 | if (err == 0 && user->esr_offset) { |
963 | struct esr_context __user *esr_ctx = | |
964 | apply_user_offset(user, user->esr_offset); | |
965 | ||
15af1942 CM |
966 | __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); |
967 | __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); | |
968 | __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); | |
15af1942 CM |
969 | } |
970 | ||
85ed24da MB |
971 | /* Scalable Vector Extension state (including streaming), if present */ |
972 | if ((system_supports_sve() || system_supports_sme()) && | |
973 | err == 0 && user->sve_offset) { | |
8cd969d2 DM |
974 | struct sve_context __user *sve_ctx = |
975 | apply_user_offset(user, user->sve_offset); | |
976 | err |= preserve_sve_context(sve_ctx); | |
977 | } | |
978 | ||
39e54499 | 979 | /* TPIDR2 if supported */ |
e9d14f3f | 980 | if (system_supports_tpidr2() && err == 0) { |
39e54499 MB |
981 | struct tpidr2_context __user *tpidr2_ctx = |
982 | apply_user_offset(user, user->tpidr2_offset); | |
983 | err |= preserve_tpidr2_context(tpidr2_ctx); | |
984 | } | |
985 | ||
39782210 MB |
986 | /* ZA state if present */ |
987 | if (system_supports_sme() && err == 0 && user->za_offset) { | |
988 | struct za_context __user *za_ctx = | |
989 | apply_user_offset(user, user->za_offset); | |
990 | err |= preserve_za_context(za_ctx); | |
991 | } | |
992 | ||
ee072cf7 MB |
993 | /* ZT state if present */ |
994 | if (system_supports_sme2() && err == 0 && user->zt_offset) { | |
995 | struct zt_context __user *zt_ctx = | |
996 | apply_user_offset(user, user->zt_offset); | |
997 | err |= preserve_zt_context(zt_ctx); | |
998 | } | |
999 | ||
33f08261 DM |
1000 | if (err == 0 && user->extra_offset) { |
1001 | char __user *sfp = (char __user *)user->sigframe; | |
1002 | char __user *userp = | |
1003 | apply_user_offset(user, user->extra_offset); | |
1004 | ||
1005 | struct extra_context __user *extra; | |
1006 | struct _aarch64_ctx __user *end; | |
1007 | u64 extra_datap; | |
1008 | u32 extra_size; | |
1009 | ||
1010 | extra = (struct extra_context __user *)userp; | |
1011 | userp += EXTRA_CONTEXT_SIZE; | |
1012 | ||
1013 | end = (struct _aarch64_ctx __user *)userp; | |
1014 | userp += TERMINATOR_SIZE; | |
1015 | ||
1016 | /* | |
1017 | * extra_datap is just written to the signal frame. | |
1018 | * The value gets cast back to a void __user * | |
1019 | * during sigreturn. | |
1020 | */ | |
1021 | extra_datap = (__force u64)userp; | |
1022 | extra_size = sfp + round_up(user->size, 16) - userp; | |
1023 | ||
1024 | __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); | |
1025 | __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); | |
1026 | __put_user_error(extra_datap, &extra->datap, err); | |
1027 | __put_user_error(extra_size, &extra->size, err); | |
1028 | ||
1029 | /* Add the terminator */ | |
1030 | __put_user_error(0, &end->magic, err); | |
1031 | __put_user_error(0, &end->size, err); | |
1032 | } | |
1033 | ||
2c020ed8 | 1034 | /* set the "end" magic */ |
bb4891a6 DM |
1035 | if (err == 0) { |
1036 | struct _aarch64_ctx __user *end = | |
1037 | apply_user_offset(user, user->end_offset); | |
1038 | ||
1039 | __put_user_error(0, &end->magic, err); | |
1040 | __put_user_error(0, &end->size, err); | |
1041 | } | |
2c020ed8 CM |
1042 | |
1043 | return err; | |
1044 | } | |
1045 | ||
20987de3 DM |
1046 | static int get_sigframe(struct rt_sigframe_user_layout *user, |
1047 | struct ksignal *ksig, struct pt_regs *regs) | |
2c020ed8 CM |
1048 | { |
1049 | unsigned long sp, sp_top; | |
bb4891a6 DM |
1050 | int err; |
1051 | ||
1052 | init_user_layout(user); | |
94b07c1f | 1053 | err = setup_sigframe_layout(user, false); |
bb4891a6 DM |
1054 | if (err) |
1055 | return err; | |
2c020ed8 | 1056 | |
38a7be3c | 1057 | sp = sp_top = sigsp(regs->sp, ksig); |
2c020ed8 | 1058 | |
20987de3 DM |
1059 | sp = round_down(sp - sizeof(struct frame_record), 16); |
1060 | user->next_frame = (struct frame_record __user *)sp; | |
1061 | ||
bb4891a6 | 1062 | sp = round_down(sp, 16) - sigframe_size(user); |
20987de3 | 1063 | user->sigframe = (struct rt_sigframe __user *)sp; |
2c020ed8 CM |
1064 | |
1065 | /* | |
1066 | * Check that we can actually write to the signal frame. | |
1067 | */ | |
96d4f267 | 1068 | if (!access_ok(user->sigframe, sp_top - sp)) |
20987de3 | 1069 | return -EFAULT; |
2c020ed8 | 1070 | |
20987de3 | 1071 | return 0; |
2c020ed8 CM |
1072 | } |
1073 | ||
304ef4e8 | 1074 | static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, |
20987de3 | 1075 | struct rt_sigframe_user_layout *user, int usig) |
2c020ed8 | 1076 | { |
2c020ed8 | 1077 | __sigrestore_t sigtramp; |
2c020ed8 CM |
1078 | |
1079 | regs->regs[0] = usig; | |
20987de3 DM |
1080 | regs->sp = (unsigned long)user->sigframe; |
1081 | regs->regs[29] = (unsigned long)&user->next_frame->fp; | |
2c020ed8 CM |
1082 | regs->pc = (unsigned long)ka->sa.sa_handler; |
1083 | ||
8ef8f360 DM |
1084 | /* |
1085 | * Signal delivery is a (wacky) indirect function call in | |
1086 | * userspace, so simulate the same setting of BTYPE as a BLR | |
1087 | * <register containing the signal handler entry point>. | |
1088 | * Signal delivery to a location in a PROT_BTI guarded page | |
1089 | * that is not a function entry point will now trigger a | |
1090 | * SIGILL in userspace. | |
1091 | * | |
1092 | * If the signal handler entry point is not in a PROT_BTI | |
1093 | * guarded page, this is harmless. | |
1094 | */ | |
1095 | if (system_supports_bti()) { | |
1096 | regs->pstate &= ~PSR_BTYPE_MASK; | |
1097 | regs->pstate |= PSR_BTYPE_C; | |
1098 | } | |
1099 | ||
637ec831 VF |
1100 | /* TCO (Tag Check Override) always cleared for signal handlers */ |
1101 | regs->pstate &= ~PSR_TCO_BIT; | |
1102 | ||
40a8e87b MB |
1103 | /* Signal handlers are invoked with ZA and streaming mode disabled */ |
1104 | if (system_supports_sme()) { | |
ea64baac MB |
1105 | /* |
1106 | * If we were in streaming mode the saved register | |
1107 | * state was SVE but we will exit SM and use the | |
1108 | * FPSIMD register state - flush the saved FPSIMD | |
1109 | * register state in case it gets loaded. | |
1110 | */ | |
baa85152 | 1111 | if (current->thread.svcr & SVCR_SM_MASK) { |
ea64baac MB |
1112 | memset(¤t->thread.uw.fpsimd_state, 0, |
1113 | sizeof(current->thread.uw.fpsimd_state)); | |
baa85152 MB |
1114 | current->thread.fp_type = FP_STATE_FPSIMD; |
1115 | } | |
ea64baac | 1116 | |
ec0067a6 MB |
1117 | current->thread.svcr &= ~(SVCR_ZA_MASK | |
1118 | SVCR_SM_MASK); | |
40a8e87b MB |
1119 | sme_smstop(); |
1120 | } | |
1121 | ||
2c020ed8 CM |
1122 | if (ka->sa.sa_flags & SA_RESTORER) |
1123 | sigtramp = ka->sa.sa_restorer; | |
1124 | else | |
1125 | sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); | |
1126 | ||
1127 | regs->regs[30] = (unsigned long)sigtramp; | |
2c020ed8 CM |
1128 | } |
1129 | ||
00554fa4 RW |
1130 | static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, |
1131 | struct pt_regs *regs) | |
2c020ed8 | 1132 | { |
20987de3 | 1133 | struct rt_sigframe_user_layout user; |
2c020ed8 | 1134 | struct rt_sigframe __user *frame; |
2c020ed8 CM |
1135 | int err = 0; |
1136 | ||
8cd969d2 DM |
1137 | fpsimd_signal_preserve_current_state(); |
1138 | ||
20987de3 | 1139 | if (get_sigframe(&user, ksig, regs)) |
2c020ed8 CM |
1140 | return 1; |
1141 | ||
20987de3 DM |
1142 | frame = user.sigframe; |
1143 | ||
2c020ed8 CM |
1144 | __put_user_error(0, &frame->uc.uc_flags, err); |
1145 | __put_user_error(NULL, &frame->uc.uc_link, err); | |
1146 | ||
207bdae4 | 1147 | err |= __save_altstack(&frame->uc.uc_stack, regs->sp); |
20987de3 | 1148 | err |= setup_sigframe(&user, regs, set); |
304ef4e8 | 1149 | if (err == 0) { |
20987de3 | 1150 | setup_return(regs, &ksig->ka, &user, usig); |
00554fa4 RW |
1151 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) { |
1152 | err |= copy_siginfo_to_user(&frame->info, &ksig->info); | |
304ef4e8 WD |
1153 | regs->regs[1] = (unsigned long)&frame->info; |
1154 | regs->regs[2] = (unsigned long)&frame->uc; | |
1155 | } | |
2c020ed8 CM |
1156 | } |
1157 | ||
1158 | return err; | |
1159 | } | |
1160 | ||
1161 | static void setup_restart_syscall(struct pt_regs *regs) | |
1162 | { | |
1163 | if (is_compat_task()) | |
1164 | compat_setup_restart_syscall(regs); | |
1165 | else | |
1166 | regs->regs[8] = __NR_restart_syscall; | |
1167 | } | |
1168 | ||
1169 | /* | |
1170 | * OK, we're invoking a handler | |
1171 | */ | |
00554fa4 | 1172 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
2c020ed8 | 1173 | { |
2c020ed8 | 1174 | sigset_t *oldset = sigmask_to_save(); |
00554fa4 | 1175 | int usig = ksig->sig; |
2c020ed8 CM |
1176 | int ret; |
1177 | ||
409d5db4 WD |
1178 | rseq_signal_deliver(ksig, regs); |
1179 | ||
2c020ed8 CM |
1180 | /* |
1181 | * Set up the stack frame | |
1182 | */ | |
1183 | if (is_compat_task()) { | |
00554fa4 RW |
1184 | if (ksig->ka.sa.sa_flags & SA_SIGINFO) |
1185 | ret = compat_setup_rt_frame(usig, ksig, oldset, regs); | |
2c020ed8 | 1186 | else |
00554fa4 | 1187 | ret = compat_setup_frame(usig, ksig, oldset, regs); |
2c020ed8 | 1188 | } else { |
00554fa4 | 1189 | ret = setup_rt_frame(usig, ksig, oldset, regs); |
2c020ed8 CM |
1190 | } |
1191 | ||
1192 | /* | |
1193 | * Check that the resulting registers are actually sane. | |
1194 | */ | |
dbd4d7ca | 1195 | ret |= !valid_user_regs(®s->user_regs, current); |
2c020ed8 | 1196 | |
ac2081cd WD |
1197 | /* Step into the signal handler if we are stepping */ |
1198 | signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); | |
2c020ed8 CM |
1199 | } |
1200 | ||
1201 | /* | |
1202 | * Note that 'init' is a special process: it doesn't get signals it doesn't | |
1203 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | |
1204 | * mistake. | |
1205 | * | |
1206 | * Note that we go through the signals twice: once to check the signals that | |
1207 | * the kernel can handle, and then we build all the user-level signal handling | |
1208 | * stack-frames in one go after that. | |
1209 | */ | |
1210 | static void do_signal(struct pt_regs *regs) | |
1211 | { | |
1212 | unsigned long continue_addr = 0, restart_addr = 0; | |
00554fa4 | 1213 | int retval = 0; |
00554fa4 | 1214 | struct ksignal ksig; |
0fe42512 | 1215 | bool syscall = in_syscall(regs); |
2c020ed8 CM |
1216 | |
1217 | /* | |
1218 | * If we were from a system call, check for system call restarting... | |
1219 | */ | |
0fe42512 | 1220 | if (syscall) { |
2c020ed8 CM |
1221 | continue_addr = regs->pc; |
1222 | restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); | |
1223 | retval = regs->regs[0]; | |
1224 | ||
1225 | /* | |
1226 | * Avoid additional syscall restarting via ret_to_user. | |
1227 | */ | |
17c28958 | 1228 | forget_syscall(regs); |
2c020ed8 CM |
1229 | |
1230 | /* | |
1231 | * Prepare for system call restart. We do this here so that a | |
1232 | * debugger will see the already changed PC. | |
1233 | */ | |
1234 | switch (retval) { | |
1235 | case -ERESTARTNOHAND: | |
1236 | case -ERESTARTSYS: | |
1237 | case -ERESTARTNOINTR: | |
1238 | case -ERESTART_RESTARTBLOCK: | |
1239 | regs->regs[0] = regs->orig_x0; | |
1240 | regs->pc = restart_addr; | |
1241 | break; | |
1242 | } | |
1243 | } | |
1244 | ||
1245 | /* | |
1246 | * Get the signal to deliver. When running under ptrace, at this point | |
1247 | * the debugger may change all of our registers. | |
1248 | */ | |
00554fa4 | 1249 | if (get_signal(&ksig)) { |
2c020ed8 CM |
1250 | /* |
1251 | * Depending on the signal settings, we may need to revert the | |
1252 | * decision to restart the system call, but skip this if a | |
1253 | * debugger has chosen to restart at a different PC. | |
1254 | */ | |
1255 | if (regs->pc == restart_addr && | |
1256 | (retval == -ERESTARTNOHAND || | |
1257 | retval == -ERESTART_RESTARTBLOCK || | |
1258 | (retval == -ERESTARTSYS && | |
00554fa4 | 1259 | !(ksig.ka.sa.sa_flags & SA_RESTART)))) { |
e30e8d46 | 1260 | syscall_set_return_value(current, regs, -EINTR, 0); |
2c020ed8 CM |
1261 | regs->pc = continue_addr; |
1262 | } | |
1263 | ||
00554fa4 | 1264 | handle_signal(&ksig, regs); |
2c020ed8 CM |
1265 | return; |
1266 | } | |
1267 | ||
1268 | /* | |
1269 | * Handle restarting a different system call. As above, if a debugger | |
1270 | * has chosen to restart at a different PC, ignore the restart. | |
1271 | */ | |
0fe42512 | 1272 | if (syscall && regs->pc == restart_addr) { |
2c020ed8 CM |
1273 | if (retval == -ERESTART_RESTARTBLOCK) |
1274 | setup_restart_syscall(regs); | |
1275 | user_rewind_single_step(current); | |
1276 | } | |
1277 | ||
1278 | restore_saved_sigmask(); | |
1279 | } | |
1280 | ||
4d1c2ee2 | 1281 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) |
2c020ed8 | 1282 | { |
421dd6fa CM |
1283 | do { |
1284 | if (thread_flags & _TIF_NEED_RESCHED) { | |
8d66772e JM |
1285 | /* Unmask Debug and SError for the next task */ |
1286 | local_daif_restore(DAIF_PROCCTX_NOIRQ); | |
1287 | ||
421dd6fa CM |
1288 | schedule(); |
1289 | } else { | |
8d66772e | 1290 | local_daif_restore(DAIF_PROCCTX); |
421dd6fa | 1291 | |
9842ceae PA |
1292 | if (thread_flags & _TIF_UPROBE) |
1293 | uprobe_notify_resume(regs); | |
1294 | ||
637ec831 VF |
1295 | if (thread_flags & _TIF_MTE_ASYNC_FAULT) { |
1296 | clear_thread_flag(TIF_MTE_ASYNC_FAULT); | |
1297 | send_sig_fault(SIGSEGV, SEGV_MTEAERR, | |
1298 | (void __user *)NULL, current); | |
1299 | } | |
1300 | ||
192caabd | 1301 | if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) |
421dd6fa CM |
1302 | do_signal(regs); |
1303 | ||
a68de80f | 1304 | if (thread_flags & _TIF_NOTIFY_RESUME) |
03248add | 1305 | resume_user_mode_work(regs); |
421dd6fa CM |
1306 | |
1307 | if (thread_flags & _TIF_FOREIGN_FPSTATE) | |
1308 | fpsimd_restore_current_state(); | |
1309 | } | |
005f78cd | 1310 | |
8d66772e | 1311 | local_daif_mask(); |
342b3808 | 1312 | thread_flags = read_thread_flags(); |
421dd6fa | 1313 | } while (thread_flags & _TIF_WORK_MASK); |
2c020ed8 | 1314 | } |
94b07c1f DM |
1315 | |
1316 | unsigned long __ro_after_init signal_minsigstksz; | |
1317 | ||
1318 | /* | |
1319 | * Determine the stack space required for guaranteed signal devliery. | |
1320 | * This function is used to populate AT_MINSIGSTKSZ at process startup. | |
1321 | * cpufeatures setup is assumed to be complete. | |
1322 | */ | |
1323 | void __init minsigstksz_setup(void) | |
1324 | { | |
1325 | struct rt_sigframe_user_layout user; | |
1326 | ||
1327 | init_user_layout(&user); | |
1328 | ||
1329 | /* | |
1330 | * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't | |
1331 | * be big enough, but it's our best guess: | |
1332 | */ | |
1333 | if (WARN_ON(setup_sigframe_layout(&user, true))) | |
1334 | return; | |
1335 | ||
1336 | signal_minsigstksz = sigframe_size(&user) + | |
1337 | round_up(sizeof(struct frame_record), 16) + | |
1338 | 16; /* max alignment padding */ | |
1339 | } | |
726e337b ME |
1340 | |
1341 | /* | |
1342 | * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as | |
1343 | * changes likely come with new fields that should be added below. | |
1344 | */ | |
1345 | static_assert(NSIGILL == 11); | |
1346 | static_assert(NSIGFPE == 15); | |
1347 | static_assert(NSIGSEGV == 9); | |
1348 | static_assert(NSIGBUS == 5); | |
1349 | static_assert(NSIGTRAP == 6); | |
1350 | static_assert(NSIGCHLD == 6); | |
1351 | static_assert(NSIGSYS == 2); | |
50ae8130 EB |
1352 | static_assert(sizeof(siginfo_t) == 128); |
1353 | static_assert(__alignof__(siginfo_t) == 8); | |
726e337b ME |
1354 | static_assert(offsetof(siginfo_t, si_signo) == 0x00); |
1355 | static_assert(offsetof(siginfo_t, si_errno) == 0x04); | |
1356 | static_assert(offsetof(siginfo_t, si_code) == 0x08); | |
1357 | static_assert(offsetof(siginfo_t, si_pid) == 0x10); | |
1358 | static_assert(offsetof(siginfo_t, si_uid) == 0x14); | |
1359 | static_assert(offsetof(siginfo_t, si_tid) == 0x10); | |
1360 | static_assert(offsetof(siginfo_t, si_overrun) == 0x14); | |
1361 | static_assert(offsetof(siginfo_t, si_status) == 0x18); | |
1362 | static_assert(offsetof(siginfo_t, si_utime) == 0x20); | |
1363 | static_assert(offsetof(siginfo_t, si_stime) == 0x28); | |
1364 | static_assert(offsetof(siginfo_t, si_value) == 0x18); | |
1365 | static_assert(offsetof(siginfo_t, si_int) == 0x18); | |
1366 | static_assert(offsetof(siginfo_t, si_ptr) == 0x18); | |
1367 | static_assert(offsetof(siginfo_t, si_addr) == 0x10); | |
1368 | static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); | |
1369 | static_assert(offsetof(siginfo_t, si_lower) == 0x20); | |
1370 | static_assert(offsetof(siginfo_t, si_upper) == 0x28); | |
1371 | static_assert(offsetof(siginfo_t, si_pkey) == 0x20); | |
1372 | static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); | |
1373 | static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); | |
78ed93d7 | 1374 | static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); |
726e337b ME |
1375 | static_assert(offsetof(siginfo_t, si_band) == 0x10); |
1376 | static_assert(offsetof(siginfo_t, si_fd) == 0x18); | |
1377 | static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); | |
1378 | static_assert(offsetof(siginfo_t, si_syscall) == 0x18); | |
1379 | static_assert(offsetof(siginfo_t, si_arch) == 0x1c); |