Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b3901d54 CM |
2 | /* |
3 | * Based on arch/arm/kernel/process.c | |
4 | * | |
5 | * Original Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | |
7 | * Copyright (C) 2012 ARM Ltd. | |
b3901d54 | 8 | */ |
fd92d4a5 | 9 | #include <linux/compat.h> |
60c0d45a | 10 | #include <linux/efi.h> |
ab7876a9 | 11 | #include <linux/elf.h> |
b3901d54 CM |
12 | #include <linux/export.h> |
13 | #include <linux/sched.h> | |
b17b0153 | 14 | #include <linux/sched/debug.h> |
29930025 | 15 | #include <linux/sched/task.h> |
68db0cf1 | 16 | #include <linux/sched/task_stack.h> |
b3901d54 | 17 | #include <linux/kernel.h> |
ab7876a9 | 18 | #include <linux/mman.h> |
b3901d54 | 19 | #include <linux/mm.h> |
780c083a | 20 | #include <linux/nospec.h> |
b3901d54 | 21 | #include <linux/stddef.h> |
63f0c603 | 22 | #include <linux/sysctl.h> |
b3901d54 CM |
23 | #include <linux/unistd.h> |
24 | #include <linux/user.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/reboot.h> | |
27 | #include <linux/interrupt.h> | |
b3901d54 CM |
28 | #include <linux/init.h> |
29 | #include <linux/cpu.h> | |
30 | #include <linux/elfcore.h> | |
31 | #include <linux/pm.h> | |
32 | #include <linux/tick.h> | |
33 | #include <linux/utsname.h> | |
34 | #include <linux/uaccess.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/hw_breakpoint.h> | |
37 | #include <linux/personality.h> | |
38 | #include <linux/notifier.h> | |
096b3224 | 39 | #include <trace/events/power.h> |
c02433dd | 40 | #include <linux/percpu.h> |
bc0ee476 | 41 | #include <linux/thread_info.h> |
63f0c603 | 42 | #include <linux/prctl.h> |
4f62bb7c | 43 | #include <linux/stacktrace.h> |
b3901d54 | 44 | |
57f4959b | 45 | #include <asm/alternative.h> |
3e9e67e1 | 46 | #include <asm/arch_timer.h> |
b3901d54 | 47 | #include <asm/compat.h> |
19c95f26 | 48 | #include <asm/cpufeature.h> |
b3901d54 | 49 | #include <asm/cacheflush.h> |
d0854412 | 50 | #include <asm/exec.h> |
ec45d1cf | 51 | #include <asm/fpsimd.h> |
fc84bc53 | 52 | #include <asm/gcs.h> |
ec45d1cf | 53 | #include <asm/mmu_context.h> |
637ec831 | 54 | #include <asm/mte.h> |
b3901d54 | 55 | #include <asm/processor.h> |
75031975 | 56 | #include <asm/pointer_auth.h> |
b3901d54 | 57 | #include <asm/stacktrace.h> |
baa96377 MS |
58 | #include <asm/switch_to.h> |
59 | #include <asm/system_misc.h> | |
b3901d54 | 60 | |
0a1213fa | 61 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) |
c0c264ae | 62 | #include <linux/stackprotector.h> |
9fcb2e93 | 63 | unsigned long __stack_chk_guard __ro_after_init; |
c0c264ae LA |
64 | EXPORT_SYMBOL(__stack_chk_guard); |
65 | #endif | |
66 | ||
b3901d54 CM |
67 | /* |
68 | * Function pointers to optional machine specific functions | |
69 | */ | |
70 | void (*pm_power_off)(void); | |
71 | EXPORT_SYMBOL_GPL(pm_power_off); | |
72 | ||
9327e2c6 | 73 | #ifdef CONFIG_HOTPLUG_CPU |
071c44e4 | 74 | void __noreturn arch_cpu_idle_dead(void) |
9327e2c6 MR |
75 | { |
76 | cpu_die(); | |
77 | } | |
78 | #endif | |
79 | ||
90f51a09 AK |
80 | /* |
81 | * Called by kexec, immediately prior to machine_kexec(). | |
82 | * | |
83 | * This must completely disable all secondary CPUs; simply causing those CPUs | |
84 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | |
85 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | |
86 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | |
d66b16f5 | 87 | * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this. |
90f51a09 | 88 | */ |
b3901d54 CM |
89 | void machine_shutdown(void) |
90 | { | |
5efbe6a6 | 91 | smp_shutdown_nonboot_cpus(reboot_cpu); |
b3901d54 CM |
92 | } |
93 | ||
90f51a09 AK |
94 | /* |
95 | * Halting simply requires that the secondary CPUs stop performing any | |
96 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
97 | * achieves this. | |
98 | */ | |
b3901d54 CM |
99 | void machine_halt(void) |
100 | { | |
b9acc49e | 101 | local_irq_disable(); |
90f51a09 | 102 | smp_send_stop(); |
b3901d54 CM |
103 | while (1); |
104 | } | |
105 | ||
90f51a09 AK |
106 | /* |
107 | * Power-off simply requires that the secondary CPUs stop performing any | |
108 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
109 | * achieves this. When the system power is turned off, it will take all CPUs | |
110 | * with it. | |
111 | */ | |
b3901d54 CM |
112 | void machine_power_off(void) |
113 | { | |
b9acc49e | 114 | local_irq_disable(); |
90f51a09 | 115 | smp_send_stop(); |
0c649914 | 116 | do_kernel_power_off(); |
b3901d54 CM |
117 | } |
118 | ||
90f51a09 AK |
119 | /* |
120 | * Restart requires that the secondary CPUs stop performing any activity | |
68234df4 | 121 | * while the primary CPU resets the system. Systems with multiple CPUs must |
90f51a09 AK |
122 | * provide a HW restart implementation, to ensure that all CPUs reset at once. |
123 | * This is required so that any code running after reset on the primary CPU | |
124 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | |
125 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | |
126 | * to use. Implementing such co-ordination would be essentially impossible. | |
127 | */ | |
b3901d54 CM |
128 | void machine_restart(char *cmd) |
129 | { | |
b3901d54 CM |
130 | /* Disable interrupts first */ |
131 | local_irq_disable(); | |
b9acc49e | 132 | smp_send_stop(); |
b3901d54 | 133 | |
60c0d45a AB |
134 | /* |
135 | * UpdateCapsule() depends on the system being reset via | |
136 | * ResetSystem(). | |
137 | */ | |
138 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | |
139 | efi_reboot(reboot_mode, NULL); | |
140 | ||
b3901d54 | 141 | /* Now call the architecture specific reboot code. */ |
ab6cef1d | 142 | do_kernel_restart(cmd); |
b3901d54 CM |
143 | |
144 | /* | |
145 | * Whoops - the architecture was unable to reboot. | |
146 | */ | |
147 | printk("Reboot failed -- System halted\n"); | |
148 | while (1); | |
149 | } | |
150 | ||
ec94a46e DM |
151 | #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str |
152 | static const char *const btypes[] = { | |
153 | bstr(NONE, "--"), | |
154 | bstr( JC, "jc"), | |
155 | bstr( C, "-c"), | |
156 | bstr( J , "j-") | |
157 | }; | |
158 | #undef bstr | |
159 | ||
b7300d4c WD |
160 | static void print_pstate(struct pt_regs *regs) |
161 | { | |
162 | u64 pstate = regs->pstate; | |
163 | ||
164 | if (compat_user_mode(regs)) { | |
ec63e300 | 165 | printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n", |
b7300d4c | 166 | pstate, |
d64567f6 MR |
167 | pstate & PSR_AA32_N_BIT ? 'N' : 'n', |
168 | pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', | |
169 | pstate & PSR_AA32_C_BIT ? 'C' : 'c', | |
170 | pstate & PSR_AA32_V_BIT ? 'V' : 'v', | |
171 | pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', | |
172 | pstate & PSR_AA32_T_BIT ? "T32" : "A32", | |
173 | pstate & PSR_AA32_E_BIT ? "BE" : "LE", | |
174 | pstate & PSR_AA32_A_BIT ? 'A' : 'a', | |
175 | pstate & PSR_AA32_I_BIT ? 'I' : 'i', | |
ec63e300 LH |
176 | pstate & PSR_AA32_F_BIT ? 'F' : 'f', |
177 | pstate & PSR_AA32_DIT_BIT ? '+' : '-', | |
178 | pstate & PSR_AA32_SSBS_BIT ? '+' : '-'); | |
b7300d4c | 179 | } else { |
ec94a46e DM |
180 | const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> |
181 | PSR_BTYPE_SHIFT]; | |
182 | ||
ec63e300 | 183 | printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n", |
b7300d4c WD |
184 | pstate, |
185 | pstate & PSR_N_BIT ? 'N' : 'n', | |
186 | pstate & PSR_Z_BIT ? 'Z' : 'z', | |
187 | pstate & PSR_C_BIT ? 'C' : 'c', | |
188 | pstate & PSR_V_BIT ? 'V' : 'v', | |
189 | pstate & PSR_D_BIT ? 'D' : 'd', | |
190 | pstate & PSR_A_BIT ? 'A' : 'a', | |
191 | pstate & PSR_I_BIT ? 'I' : 'i', | |
192 | pstate & PSR_F_BIT ? 'F' : 'f', | |
193 | pstate & PSR_PAN_BIT ? '+' : '-', | |
ec94a46e | 194 | pstate & PSR_UAO_BIT ? '+' : '-', |
637ec831 | 195 | pstate & PSR_TCO_BIT ? '+' : '-', |
ec63e300 LH |
196 | pstate & PSR_DIT_BIT ? '+' : '-', |
197 | pstate & PSR_SSBS_BIT ? '+' : '-', | |
ec94a46e | 198 | btype_str); |
b7300d4c WD |
199 | } |
200 | } | |
201 | ||
b3901d54 CM |
202 | void __show_regs(struct pt_regs *regs) |
203 | { | |
6ca68e80 CM |
204 | int i, top_reg; |
205 | u64 lr, sp; | |
206 | ||
207 | if (compat_user_mode(regs)) { | |
208 | lr = regs->compat_lr; | |
209 | sp = regs->compat_sp; | |
210 | top_reg = 12; | |
211 | } else { | |
212 | lr = regs->regs[30]; | |
213 | sp = regs->sp; | |
214 | top_reg = 29; | |
215 | } | |
b3901d54 | 216 | |
a43cb95d | 217 | show_regs_print_info(KERN_DEFAULT); |
b7300d4c | 218 | print_pstate(regs); |
a06f818a WD |
219 | |
220 | if (!user_mode(regs)) { | |
221 | printk("pc : %pS\n", (void *)regs->pc); | |
ca708599 | 222 | printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr)); |
a06f818a WD |
223 | } else { |
224 | printk("pc : %016llx\n", regs->pc); | |
225 | printk("lr : %016llx\n", lr); | |
226 | } | |
227 | ||
b7300d4c | 228 | printk("sp : %016llx\n", sp); |
db4b0710 | 229 | |
133d0518 | 230 | if (system_uses_irq_prio_masking()) |
14543630 | 231 | printk("pmr: %08x\n", regs->pmr); |
133d0518 | 232 | |
db4b0710 MR |
233 | i = top_reg; |
234 | ||
235 | while (i >= 0) { | |
0bca3ec8 | 236 | printk("x%-2d: %016llx", i, regs->regs[i]); |
db4b0710 | 237 | |
0bca3ec8 MWO |
238 | while (i-- % 3) |
239 | pr_cont(" x%-2d: %016llx", i, regs->regs[i]); | |
db4b0710 MR |
240 | |
241 | pr_cont("\n"); | |
b3901d54 | 242 | } |
b3901d54 CM |
243 | } |
244 | ||
d9f1b52a | 245 | void show_regs(struct pt_regs *regs) |
b3901d54 | 246 | { |
b3901d54 | 247 | __show_regs(regs); |
c7689837 | 248 | dump_backtrace(regs, NULL, KERN_DEFAULT); |
b3901d54 CM |
249 | } |
250 | ||
eb35bdd7 WD |
251 | static void tls_thread_flush(void) |
252 | { | |
adf75899 | 253 | write_sysreg(0, tpidr_el0); |
a9d69158 MB |
254 | if (system_supports_tpidr2()) |
255 | write_sysreg_s(0, SYS_TPIDR2_EL0); | |
eb35bdd7 WD |
256 | |
257 | if (is_compat_task()) { | |
65896545 | 258 | current->thread.uw.tp_value = 0; |
eb35bdd7 WD |
259 | |
260 | /* | |
261 | * We need to ensure ordering between the shadow state and the | |
262 | * hardware state, so that we don't corrupt the hardware state | |
263 | * with a stale shadow state during context switch. | |
264 | */ | |
265 | barrier(); | |
adf75899 | 266 | write_sysreg(0, tpidrro_el0); |
eb35bdd7 WD |
267 | } |
268 | } | |
269 | ||
63f0c603 CM |
270 | static void flush_tagged_addr_state(void) |
271 | { | |
272 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) | |
273 | clear_thread_flag(TIF_TAGGED_ADDR); | |
274 | } | |
275 | ||
160a8e13 JG |
276 | static void flush_poe(void) |
277 | { | |
278 | if (!system_supports_poe()) | |
279 | return; | |
280 | ||
281 | write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); | |
282 | } | |
283 | ||
fc84bc53 MB |
284 | #ifdef CONFIG_ARM64_GCS |
285 | ||
286 | static void flush_gcs(void) | |
287 | { | |
288 | if (!system_supports_gcs()) | |
289 | return; | |
290 | ||
d2be3270 MB |
291 | current->thread.gcspr_el0 = 0; |
292 | current->thread.gcs_base = 0; | |
293 | current->thread.gcs_size = 0; | |
fc84bc53 MB |
294 | current->thread.gcs_el0_mode = 0; |
295 | write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1); | |
296 | write_sysreg_s(0, SYS_GCSPR_EL0); | |
297 | } | |
298 | ||
506496bc MB |
299 | static int copy_thread_gcs(struct task_struct *p, |
300 | const struct kernel_clone_args *args) | |
301 | { | |
302 | unsigned long gcs; | |
303 | ||
304 | if (!system_supports_gcs()) | |
305 | return 0; | |
306 | ||
307 | p->thread.gcs_base = 0; | |
308 | p->thread.gcs_size = 0; | |
309 | ||
310 | gcs = gcs_alloc_thread_stack(p, args); | |
311 | if (IS_ERR_VALUE(gcs)) | |
312 | return PTR_ERR((void *)gcs); | |
313 | ||
314 | p->thread.gcs_el0_mode = current->thread.gcs_el0_mode; | |
315 | p->thread.gcs_el0_locked = current->thread.gcs_el0_locked; | |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
fc84bc53 MB |
320 | #else |
321 | ||
322 | static void flush_gcs(void) { } | |
506496bc MB |
323 | static int copy_thread_gcs(struct task_struct *p, |
324 | const struct kernel_clone_args *args) | |
325 | { | |
326 | return 0; | |
327 | } | |
fc84bc53 MB |
328 | |
329 | #endif | |
330 | ||
b3901d54 CM |
331 | void flush_thread(void) |
332 | { | |
333 | fpsimd_flush_thread(); | |
eb35bdd7 | 334 | tls_thread_flush(); |
b3901d54 | 335 | flush_ptrace_hw_breakpoint(current); |
63f0c603 | 336 | flush_tagged_addr_state(); |
160a8e13 | 337 | flush_poe(); |
fc84bc53 | 338 | flush_gcs(); |
b3901d54 CM |
339 | } |
340 | ||
bc0ee476 DM |
341 | void arch_release_task_struct(struct task_struct *tsk) |
342 | { | |
343 | fpsimd_release_task(tsk); | |
506496bc | 344 | gcs_free(tsk); |
bc0ee476 DM |
345 | } |
346 | ||
b3901d54 CM |
347 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
348 | { | |
e0cb0f26 MR |
349 | /* |
350 | * The current/src task's FPSIMD state may or may not be live, and may | |
351 | * have been altered by ptrace after entry to the kernel. Save the | |
352 | * effective FPSIMD state so that this will be copied into dst. | |
353 | */ | |
354 | fpsimd_save_and_flush_current_state(); | |
355 | fpsimd_sync_from_effective_state(src); | |
356 | ||
b3901d54 | 357 | *dst = *src; |
bc0ee476 | 358 | |
4585fc59 | 359 | /* |
a6d066f7 MR |
360 | * Drop stale reference to src's sve_state and convert dst to |
361 | * non-streaming FPSIMD mode. | |
4585fc59 | 362 | */ |
a6d066f7 | 363 | dst->thread.fp_type = FP_STATE_FPSIMD; |
4585fc59 MM |
364 | dst->thread.sve_state = NULL; |
365 | clear_tsk_thread_flag(dst, TIF_SVE); | |
a6d066f7 | 366 | task_smstop_sm(dst); |
4585fc59 | 367 | |
8bd7f91c | 368 | /* |
cde5c32d MR |
369 | * Drop stale reference to src's sme_state and ensure dst has ZA |
370 | * disabled. | |
371 | * | |
372 | * When necessary, ZA will be inherited later in copy_thread_za(). | |
8bd7f91c | 373 | */ |
cde5c32d MR |
374 | dst->thread.sme_state = NULL; |
375 | clear_tsk_thread_flag(dst, TIF_SME); | |
376 | dst->thread.svcr &= ~SVCR_ZA_MASK; | |
baa85152 | 377 | |
637ec831 VF |
378 | /* clear any pending asynchronous tag fault raised by the parent */ |
379 | clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); | |
380 | ||
b3901d54 CM |
381 | return 0; |
382 | } | |
383 | ||
cde5c32d MR |
384 | static int copy_thread_za(struct task_struct *dst, struct task_struct *src) |
385 | { | |
386 | if (!thread_za_enabled(&src->thread)) | |
387 | return 0; | |
388 | ||
389 | dst->thread.sve_state = kzalloc(sve_state_size(src), | |
390 | GFP_KERNEL); | |
391 | if (!dst->thread.sve_state) | |
392 | return -ENOMEM; | |
393 | ||
394 | dst->thread.sme_state = kmemdup(src->thread.sme_state, | |
395 | sme_state_size(src), | |
396 | GFP_KERNEL); | |
397 | if (!dst->thread.sme_state) { | |
398 | kfree(dst->thread.sve_state); | |
399 | dst->thread.sve_state = NULL; | |
400 | return -ENOMEM; | |
401 | } | |
402 | ||
403 | set_tsk_thread_flag(dst, TIF_SME); | |
404 | dst->thread.svcr |= SVCR_ZA_MASK; | |
405 | ||
406 | return 0; | |
407 | } | |
408 | ||
b3901d54 CM |
409 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); |
410 | ||
c5febea0 | 411 | int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) |
b3901d54 | 412 | { |
c5febea0 EB |
413 | unsigned long clone_flags = args->flags; |
414 | unsigned long stack_start = args->stack; | |
c5febea0 | 415 | unsigned long tls = args->tls; |
b3901d54 | 416 | struct pt_regs *childregs = task_pt_regs(p); |
506496bc | 417 | int ret; |
b3901d54 | 418 | |
c34501d2 | 419 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
b3901d54 | 420 | |
071b6d4a DM |
421 | /* |
422 | * In case p was allocated the same task_struct pointer as some | |
423 | * other recently-exited task, make sure p is disassociated from | |
424 | * any cpu that may have run that now-exited task recently. | |
425 | * Otherwise we could erroneously skip reloading the FPSIMD | |
426 | * registers for p. | |
427 | */ | |
428 | fpsimd_flush_task_state(p); | |
429 | ||
33e45234 KM |
430 | ptrauth_thread_init_kernel(p); |
431 | ||
5bd2e97c | 432 | if (likely(!args->fn)) { |
9ac08002 | 433 | *childregs = *current_pt_regs(); |
c34501d2 | 434 | childregs->regs[0] = 0; |
d00a3810 WD |
435 | |
436 | /* | |
437 | * Read the current TLS pointer from tpidr_el0 as it may be | |
438 | * out-of-sync with the saved value. | |
439 | */ | |
adf75899 | 440 | *task_user_tls(p) = read_sysreg(tpidr_el0); |
d00a3810 | 441 | |
160a8e13 JG |
442 | if (system_supports_poe()) |
443 | p->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); | |
444 | ||
d00a3810 WD |
445 | if (stack_start) { |
446 | if (is_compat_thread(task_thread_info(p))) | |
e0fd18ce | 447 | childregs->compat_sp = stack_start; |
d00a3810 | 448 | else |
e0fd18ce | 449 | childregs->sp = stack_start; |
c34501d2 | 450 | } |
d00a3810 | 451 | |
cde5c32d MR |
452 | /* |
453 | * Due to the AAPCS64 "ZA lazy saving scheme", PSTATE.ZA and | |
454 | * TPIDR2 need to be manipulated as a pair, and either both | |
455 | * need to be inherited or both need to be reset. | |
456 | * | |
457 | * Within a process, child threads must not inherit their | |
458 | * parent's TPIDR2 value or they may clobber their parent's | |
459 | * stack at some later point. | |
460 | * | |
461 | * When a process is fork()'d, the child must inherit ZA and | |
462 | * TPIDR2 from its parent in case there was dormant ZA state. | |
463 | * | |
464 | * Use CLONE_VM to determine when the child will share the | |
465 | * address space with the parent, and cannot safely inherit the | |
466 | * state. | |
467 | */ | |
468 | if (system_supports_sme()) { | |
469 | if (!(clone_flags & CLONE_VM)) { | |
470 | p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); | |
471 | ret = copy_thread_za(p, current); | |
472 | if (ret) | |
473 | return ret; | |
474 | } else { | |
475 | p->thread.tpidr2_el0 = 0; | |
476 | WARN_ON_ONCE(p->thread.svcr & SVCR_ZA_MASK); | |
477 | } | |
478 | } | |
479 | ||
b3901d54 | 480 | /* |
a4376f2f | 481 | * If a TLS pointer was passed to clone, use it for the new |
cde5c32d | 482 | * thread. |
b3901d54 | 483 | */ |
cde5c32d | 484 | if (clone_flags & CLONE_SETTLS) |
a4376f2f | 485 | p->thread.uw.tp_value = tls; |
506496bc MB |
486 | |
487 | ret = copy_thread_gcs(p, args); | |
488 | if (ret != 0) | |
489 | return ret; | |
c34501d2 | 490 | } else { |
f80d0340 MR |
491 | /* |
492 | * A kthread has no context to ERET to, so ensure any buggy | |
493 | * ERET is treated as an illegal exception return. | |
494 | * | |
495 | * When a user task is created from a kthread, childregs will | |
496 | * be initialized by start_thread() or start_compat_thread(). | |
497 | */ | |
c34501d2 | 498 | memset(childregs, 0, sizeof(struct pt_regs)); |
f80d0340 | 499 | childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT; |
c2c6b27b | 500 | childregs->stackframe.type = FRAME_META_TYPE_FINAL; |
133d0518 | 501 | |
5bd2e97c EB |
502 | p->thread.cpu_context.x19 = (unsigned long)args->fn; |
503 | p->thread.cpu_context.x20 = (unsigned long)args->fn_arg; | |
e3e85271 JG |
504 | |
505 | if (system_supports_poe()) | |
506 | p->thread.por_el0 = POR_EL0_INIT; | |
b3901d54 | 507 | } |
b3901d54 | 508 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
c34501d2 | 509 | p->thread.cpu_context.sp = (unsigned long)childregs; |
7d7b720a MV |
510 | /* |
511 | * For the benefit of the unwinder, set up childregs->stackframe | |
512 | * as the final frame for the new task. | |
513 | */ | |
886c2b0b | 514 | p->thread.cpu_context.fp = (unsigned long)&childregs->stackframe; |
b3901d54 CM |
515 | |
516 | ptrace_hw_copy_thread(p); | |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
936eb65c DM |
521 | void tls_preserve_current_state(void) |
522 | { | |
523 | *task_user_tls(current) = read_sysreg(tpidr_el0); | |
a9d69158 MB |
524 | if (system_supports_tpidr2() && !is_compat_task()) |
525 | current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); | |
936eb65c DM |
526 | } |
527 | ||
b3901d54 CM |
528 | static void tls_thread_switch(struct task_struct *next) |
529 | { | |
936eb65c | 530 | tls_preserve_current_state(); |
b3901d54 | 531 | |
18011eac | 532 | if (is_compat_thread(task_thread_info(next))) |
65896545 | 533 | write_sysreg(next->thread.uw.tp_value, tpidrro_el0); |
67ab51cb | 534 | else |
18011eac | 535 | write_sysreg(0, tpidrro_el0); |
b3901d54 | 536 | |
18011eac | 537 | write_sysreg(*task_user_tls(next), tpidr_el0); |
a9d69158 MB |
538 | if (system_supports_tpidr2()) |
539 | write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); | |
b3901d54 CM |
540 | } |
541 | ||
cbdf8a18 MZ |
542 | /* |
543 | * Force SSBS state on context-switch, since it may be lost after migrating | |
544 | * from a CPU which treats the bit as RES0 in a heterogeneous system. | |
545 | */ | |
546 | static void ssbs_thread_switch(struct task_struct *next) | |
547 | { | |
cbdf8a18 MZ |
548 | /* |
549 | * Nothing to do for kernel threads, but 'regs' may be junk | |
550 | * (e.g. idle task) so check the flags and bail early. | |
551 | */ | |
552 | if (unlikely(next->flags & PF_KTHREAD)) | |
553 | return; | |
554 | ||
fca3d33d WD |
555 | /* |
556 | * If all CPUs implement the SSBS extension, then we just need to | |
557 | * context-switch the PSTATE field. | |
558 | */ | |
bc75d0c0 | 559 | if (alternative_has_cap_unlikely(ARM64_SSBS)) |
cbdf8a18 MZ |
560 | return; |
561 | ||
c2876207 | 562 | spectre_v4_enable_task_mitigation(next); |
cbdf8a18 MZ |
563 | } |
564 | ||
c02433dd MR |
565 | /* |
566 | * We store our current task in sp_el0, which is clobbered by userspace. Keep a | |
567 | * shadow copy so that we can restore this upon entry from userspace. | |
568 | * | |
569 | * This is *only* for exception entry from EL0, and is not valid until we | |
570 | * __switch_to() a user task. | |
571 | */ | |
572 | DEFINE_PER_CPU(struct task_struct *, __entry_task); | |
573 | ||
574 | static void entry_task_switch(struct task_struct *next) | |
575 | { | |
576 | __this_cpu_write(__entry_task, next); | |
577 | } | |
578 | ||
fc84bc53 MB |
579 | #ifdef CONFIG_ARM64_GCS |
580 | ||
581 | void gcs_preserve_current_state(void) | |
582 | { | |
583 | current->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); | |
584 | } | |
585 | ||
586 | static void gcs_thread_switch(struct task_struct *next) | |
587 | { | |
588 | if (!system_supports_gcs()) | |
589 | return; | |
590 | ||
591 | /* GCSPR_EL0 is always readable */ | |
592 | gcs_preserve_current_state(); | |
593 | write_sysreg_s(next->thread.gcspr_el0, SYS_GCSPR_EL0); | |
594 | ||
595 | if (current->thread.gcs_el0_mode != next->thread.gcs_el0_mode) | |
596 | gcs_set_el0_mode(next); | |
597 | ||
598 | /* | |
599 | * Ensure that GCS memory effects of the 'prev' thread are | |
600 | * ordered before other memory accesses with release semantics | |
601 | * (or preceded by a DMB) on the current PE. In addition, any | |
602 | * memory accesses with acquire semantics (or succeeded by a | |
603 | * DMB) are ordered before GCS memory effects of the 'next' | |
604 | * thread. This will ensure that the GCS memory effects are | |
605 | * visible to other PEs in case of migration. | |
606 | */ | |
607 | if (task_gcs_el0_enabled(current) || task_gcs_el0_enabled(next)) | |
608 | gcsb_dsync(); | |
609 | } | |
610 | ||
611 | #else | |
612 | ||
613 | static void gcs_thread_switch(struct task_struct *next) | |
614 | { | |
615 | } | |
616 | ||
617 | #endif | |
618 | ||
d49f7d73 | 619 | /* |
3e9e67e1 PC |
620 | * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of |
621 | * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0} | |
622 | * accesses and prctl(PR_SET_TSC). Ensure access is disabled iff a workaround is | |
623 | * required or PR_TSC_SIGSEGV is set. | |
d49f7d73 | 624 | */ |
3e9e67e1 | 625 | static void update_cntkctl_el1(struct task_struct *next) |
d49f7d73 | 626 | { |
3e9e67e1 | 627 | struct thread_info *ti = task_thread_info(next); |
d49f7d73 | 628 | |
3e9e67e1 PC |
629 | if (test_ti_thread_flag(ti, TIF_TSC_SIGSEGV) || |
630 | has_erratum_handler(read_cntvct_el0) || | |
631 | (IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) && | |
632 | this_cpu_has_cap(ARM64_WORKAROUND_1418040) && | |
633 | is_compat_thread(ti))) | |
38e0257e | 634 | sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); |
d49f7d73 | 635 | else |
38e0257e SP |
636 | sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); |
637 | } | |
d49f7d73 | 638 | |
3e9e67e1 PC |
639 | static void cntkctl_thread_switch(struct task_struct *prev, |
640 | struct task_struct *next) | |
641 | { | |
642 | if ((read_ti_thread_flags(task_thread_info(prev)) & | |
643 | (_TIF_32BIT | _TIF_TSC_SIGSEGV)) != | |
644 | (read_ti_thread_flags(task_thread_info(next)) & | |
645 | (_TIF_32BIT | _TIF_TSC_SIGSEGV))) | |
646 | update_cntkctl_el1(next); | |
647 | } | |
648 | ||
649 | static int do_set_tsc_mode(unsigned int val) | |
38e0257e | 650 | { |
3e9e67e1 PC |
651 | bool tsc_sigsegv; |
652 | ||
653 | if (val == PR_TSC_SIGSEGV) | |
654 | tsc_sigsegv = true; | |
655 | else if (val == PR_TSC_ENABLE) | |
656 | tsc_sigsegv = false; | |
657 | else | |
658 | return -EINVAL; | |
659 | ||
38e0257e | 660 | preempt_disable(); |
3e9e67e1 PC |
661 | update_thread_flag(TIF_TSC_SIGSEGV, tsc_sigsegv); |
662 | update_cntkctl_el1(current); | |
38e0257e | 663 | preempt_enable(); |
3e9e67e1 PC |
664 | |
665 | return 0; | |
d49f7d73 MZ |
666 | } |
667 | ||
160a8e13 JG |
668 | static void permission_overlay_switch(struct task_struct *next) |
669 | { | |
670 | if (!system_supports_poe()) | |
671 | return; | |
672 | ||
673 | current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0); | |
674 | if (current->thread.por_el0 != next->thread.por_el0) { | |
675 | write_sysreg_s(next->thread.por_el0, SYS_POR_EL0); | |
22f3a4f6 KB |
676 | /* |
677 | * No ISB required as we can tolerate spurious Overlay faults - | |
678 | * the fault handler will check again based on the new value | |
679 | * of POR_EL0. | |
680 | */ | |
160a8e13 JG |
681 | } |
682 | } | |
683 | ||
d2e0d8f9 PC |
684 | /* |
685 | * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore | |
686 | * this function must be called with preemption disabled and the update to | |
687 | * sctlr_user must be made in the same preemption disabled block so that | |
688 | * __switch_to() does not see the variable update before the SCTLR_EL1 one. | |
689 | */ | |
690 | void update_sctlr_el1(u64 sctlr) | |
2f79d2fc | 691 | { |
20169862 PC |
692 | /* |
693 | * EnIA must not be cleared while in the kernel as this is necessary for | |
694 | * in-kernel PAC. It will be cleared on kernel exit if needed. | |
695 | */ | |
696 | sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr); | |
2f79d2fc PC |
697 | |
698 | /* ISB required for the kernel uaccess routines when setting TCF0. */ | |
699 | isb(); | |
700 | } | |
701 | ||
b3901d54 CM |
702 | /* |
703 | * Thread switching. | |
704 | */ | |
86bcbafc MR |
705 | __notrace_funcgraph __sched |
706 | struct task_struct *__switch_to(struct task_struct *prev, | |
b3901d54 CM |
707 | struct task_struct *next) |
708 | { | |
709 | struct task_struct *last; | |
710 | ||
711 | fpsimd_thread_switch(next); | |
712 | tls_thread_switch(next); | |
713 | hw_breakpoint_thread_switch(next); | |
3325732f | 714 | contextidr_thread_switch(next); |
c02433dd | 715 | entry_task_switch(next); |
cbdf8a18 | 716 | ssbs_thread_switch(next); |
3e9e67e1 | 717 | cntkctl_thread_switch(prev, next); |
b90e4839 | 718 | ptrauth_thread_switch_user(next); |
160a8e13 | 719 | permission_overlay_switch(next); |
fc84bc53 | 720 | gcs_thread_switch(next); |
b3901d54 | 721 | |
5108c67c | 722 | /* |
5fdd05ef RR |
723 | * Complete any pending TLB or cache maintenance on this CPU in case the |
724 | * thread migrates to a different CPU. This full barrier is also | |
725 | * required by the membarrier system call. Additionally it makes any | |
726 | * in-progress pgtable writes visible to the table walker; See | |
727 | * emit_pte_barriers(). | |
5108c67c | 728 | */ |
98f7685e | 729 | dsb(ish); |
b3901d54 | 730 | |
1c101da8 CM |
731 | /* |
732 | * MTE thread switching must happen after the DSB above to ensure that | |
733 | * any asynchronous tag check faults have been logged in the TFSR*_EL1 | |
734 | * registers. | |
735 | */ | |
736 | mte_thread_switch(next); | |
2f79d2fc PC |
737 | /* avoid expensive SCTLR_EL1 accesses if no change */ |
738 | if (prev->thread.sctlr_user != next->thread.sctlr_user) | |
739 | update_sctlr_el1(next->thread.sctlr_user); | |
1c101da8 | 740 | |
b3901d54 CM |
741 | /* the actual thread switch */ |
742 | last = cpu_switch_to(prev, next); | |
743 | ||
744 | return last; | |
745 | } | |
746 | ||
4f62bb7c MV |
747 | struct wchan_info { |
748 | unsigned long pc; | |
749 | int count; | |
750 | }; | |
751 | ||
752 | static bool get_wchan_cb(void *arg, unsigned long pc) | |
753 | { | |
754 | struct wchan_info *wchan_info = arg; | |
755 | ||
756 | if (!in_sched_functions(pc)) { | |
757 | wchan_info->pc = pc; | |
758 | return false; | |
759 | } | |
760 | return wchan_info->count++ < 16; | |
761 | } | |
762 | ||
42a20f86 | 763 | unsigned long __get_wchan(struct task_struct *p) |
b3901d54 | 764 | { |
4f62bb7c MV |
765 | struct wchan_info wchan_info = { |
766 | .pc = 0, | |
767 | .count = 0, | |
768 | }; | |
b3901d54 | 769 | |
4f62bb7c | 770 | if (!try_get_task_stack(p)) |
9bbd4c56 MR |
771 | return 0; |
772 | ||
4f62bb7c | 773 | arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL); |
f3dcbe67 | 774 | |
9bbd4c56 | 775 | put_task_stack(p); |
4f62bb7c MV |
776 | |
777 | return wchan_info.pc; | |
b3901d54 CM |
778 | } |
779 | ||
780 | unsigned long arch_align_stack(unsigned long sp) | |
781 | { | |
782 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | |
8032bf12 | 783 | sp -= get_random_u32_below(PAGE_SIZE); |
b3901d54 CM |
784 | return sp & ~0xf; |
785 | } | |
786 | ||
08cd8f41 WD |
787 | #ifdef CONFIG_COMPAT |
788 | int compat_elf_check_arch(const struct elf32_hdr *hdr) | |
789 | { | |
790 | if (!system_supports_32bit_el0()) | |
791 | return false; | |
792 | ||
793 | if ((hdr)->e_machine != EM_ARM) | |
794 | return false; | |
795 | ||
796 | if (!((hdr)->e_flags & EF_ARM_EABI_MASK)) | |
797 | return false; | |
798 | ||
799 | /* | |
800 | * Prevent execve() of a 32-bit program from a deadline task | |
801 | * if the restricted affinity mask would be inadmissible on an | |
802 | * asymmetric system. | |
803 | */ | |
804 | return !static_branch_unlikely(&arm64_mismatched_32bit_el0) || | |
805 | !dl_task_check_affinity(current, system_32bit_el0_cpumask()); | |
806 | } | |
807 | #endif | |
808 | ||
d1be5c99 YN |
809 | /* |
810 | * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. | |
811 | */ | |
812 | void arch_setup_new_exec(void) | |
813 | { | |
873c3e89 WD |
814 | unsigned long mmflags = 0; |
815 | ||
816 | if (is_compat_task()) { | |
817 | mmflags = MMCF_AARCH32; | |
08cd8f41 WD |
818 | |
819 | /* | |
820 | * Restrict the CPU affinity mask for a 32-bit task so that | |
821 | * it contains only 32-bit-capable CPUs. | |
822 | * | |
823 | * From the perspective of the task, this looks similar to | |
824 | * what would happen if the 64-bit-only CPUs were hot-unplugged | |
825 | * at the point of execve(), although we try a bit harder to | |
826 | * honour the cpuset hierarchy. | |
827 | */ | |
873c3e89 | 828 | if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) |
08cd8f41 | 829 | force_compatible_cpus_allowed_ptr(current); |
08cd8f41 WD |
830 | } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) { |
831 | relax_compatible_cpus_allowed_ptr(current); | |
873c3e89 | 832 | } |
75031975 | 833 | |
873c3e89 | 834 | current->mm->context.flags = mmflags; |
20169862 PC |
835 | ptrauth_thread_init_user(); |
836 | mte_thread_init_user(); | |
3e9e67e1 | 837 | do_set_tsc_mode(PR_TSC_ENABLE); |
780c083a WD |
838 | |
839 | if (task_spec_ssb_noexec(current)) { | |
840 | arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, | |
841 | PR_SPEC_ENABLE); | |
842 | } | |
d1be5c99 | 843 | } |
63f0c603 CM |
844 | |
845 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI | |
846 | /* | |
847 | * Control the relaxed ABI allowing tagged user addresses into the kernel. | |
848 | */ | |
413235fc | 849 | static unsigned int tagged_addr_disabled; |
63f0c603 | 850 | |
93f067f6 | 851 | long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) |
63f0c603 | 852 | { |
1c101da8 | 853 | unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE; |
93f067f6 | 854 | struct thread_info *ti = task_thread_info(task); |
1c101da8 | 855 | |
93f067f6 | 856 | if (is_compat_thread(ti)) |
63f0c603 | 857 | return -EINVAL; |
1c101da8 CM |
858 | |
859 | if (system_supports_mte()) | |
766121ba MB |
860 | valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \ |
861 | | PR_MTE_TAG_MASK; | |
1c101da8 CM |
862 | |
863 | if (arg & ~valid_mask) | |
63f0c603 CM |
864 | return -EINVAL; |
865 | ||
413235fc CM |
866 | /* |
867 | * Do not allow the enabling of the tagged address ABI if globally | |
868 | * disabled via sysctl abi.tagged_addr_disabled. | |
869 | */ | |
870 | if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) | |
871 | return -EINVAL; | |
872 | ||
93f067f6 | 873 | if (set_mte_ctrl(task, arg) != 0) |
1c101da8 CM |
874 | return -EINVAL; |
875 | ||
93f067f6 | 876 | update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); |
63f0c603 CM |
877 | |
878 | return 0; | |
879 | } | |
880 | ||
93f067f6 | 881 | long get_tagged_addr_ctrl(struct task_struct *task) |
63f0c603 | 882 | { |
1c101da8 | 883 | long ret = 0; |
93f067f6 | 884 | struct thread_info *ti = task_thread_info(task); |
1c101da8 | 885 | |
93f067f6 | 886 | if (is_compat_thread(ti)) |
63f0c603 CM |
887 | return -EINVAL; |
888 | ||
93f067f6 | 889 | if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR)) |
1c101da8 | 890 | ret = PR_TAGGED_ADDR_ENABLE; |
63f0c603 | 891 | |
93f067f6 | 892 | ret |= get_mte_ctrl(task); |
1c101da8 CM |
893 | |
894 | return ret; | |
63f0c603 CM |
895 | } |
896 | ||
897 | /* | |
898 | * Global sysctl to disable the tagged user addresses support. This control | |
899 | * only prevents the tagged address ABI enabling via prctl() and does not | |
900 | * disable it for tasks that already opted in to the relaxed ABI. | |
901 | */ | |
63f0c603 | 902 | |
1751f872 | 903 | static const struct ctl_table tagged_addr_sysctl_table[] = { |
63f0c603 | 904 | { |
413235fc | 905 | .procname = "tagged_addr_disabled", |
63f0c603 | 906 | .mode = 0644, |
413235fc | 907 | .data = &tagged_addr_disabled, |
63f0c603 CM |
908 | .maxlen = sizeof(int), |
909 | .proc_handler = proc_dointvec_minmax, | |
2c614c11 MC |
910 | .extra1 = SYSCTL_ZERO, |
911 | .extra2 = SYSCTL_ONE, | |
63f0c603 | 912 | }, |
63f0c603 CM |
913 | }; |
914 | ||
915 | static int __init tagged_addr_init(void) | |
916 | { | |
917 | if (!register_sysctl("abi", tagged_addr_sysctl_table)) | |
918 | return -EINVAL; | |
919 | return 0; | |
920 | } | |
921 | ||
922 | core_initcall(tagged_addr_init); | |
923 | #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ | |
19c95f26 | 924 | |
ab7876a9 DM |
925 | #ifdef CONFIG_BINFMT_ELF |
926 | int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, | |
927 | bool has_interp, bool is_interp) | |
928 | { | |
5d1b631c MB |
929 | /* |
930 | * For dynamically linked executables the interpreter is | |
931 | * responsible for setting PROT_BTI on everything except | |
932 | * itself. | |
933 | */ | |
ab7876a9 DM |
934 | if (is_interp != has_interp) |
935 | return prot; | |
936 | ||
937 | if (!(state->flags & ARM64_ELF_BTI)) | |
938 | return prot; | |
939 | ||
940 | if (prot & PROT_EXEC) | |
941 | prot |= PROT_BTI; | |
942 | ||
943 | return prot; | |
944 | } | |
945 | #endif | |
3e9e67e1 PC |
946 | |
947 | int get_tsc_mode(unsigned long adr) | |
948 | { | |
949 | unsigned int val; | |
950 | ||
951 | if (is_compat_task()) | |
952 | return -EINVAL; | |
953 | ||
954 | if (test_thread_flag(TIF_TSC_SIGSEGV)) | |
955 | val = PR_TSC_SIGSEGV; | |
956 | else | |
957 | val = PR_TSC_ENABLE; | |
958 | ||
959 | return put_user(val, (unsigned int __user *)adr); | |
960 | } | |
961 | ||
962 | int set_tsc_mode(unsigned int val) | |
963 | { | |
964 | if (is_compat_task()) | |
965 | return -EINVAL; | |
966 | ||
967 | return do_set_tsc_mode(val); | |
968 | } |