Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b3901d54 CM |
2 | /* |
3 | * Based on arch/arm/kernel/process.c | |
4 | * | |
5 | * Original Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | |
7 | * Copyright (C) 2012 ARM Ltd. | |
b3901d54 | 8 | */ |
fd92d4a5 | 9 | #include <linux/compat.h> |
60c0d45a | 10 | #include <linux/efi.h> |
ab7876a9 | 11 | #include <linux/elf.h> |
b3901d54 CM |
12 | #include <linux/export.h> |
13 | #include <linux/sched.h> | |
b17b0153 | 14 | #include <linux/sched/debug.h> |
29930025 | 15 | #include <linux/sched/task.h> |
68db0cf1 | 16 | #include <linux/sched/task_stack.h> |
b3901d54 | 17 | #include <linux/kernel.h> |
ab7876a9 | 18 | #include <linux/mman.h> |
b3901d54 | 19 | #include <linux/mm.h> |
780c083a | 20 | #include <linux/nospec.h> |
b3901d54 | 21 | #include <linux/stddef.h> |
63f0c603 | 22 | #include <linux/sysctl.h> |
b3901d54 CM |
23 | #include <linux/unistd.h> |
24 | #include <linux/user.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/reboot.h> | |
27 | #include <linux/interrupt.h> | |
b3901d54 CM |
28 | #include <linux/init.h> |
29 | #include <linux/cpu.h> | |
30 | #include <linux/elfcore.h> | |
31 | #include <linux/pm.h> | |
32 | #include <linux/tick.h> | |
33 | #include <linux/utsname.h> | |
34 | #include <linux/uaccess.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/hw_breakpoint.h> | |
37 | #include <linux/personality.h> | |
38 | #include <linux/notifier.h> | |
096b3224 | 39 | #include <trace/events/power.h> |
c02433dd | 40 | #include <linux/percpu.h> |
bc0ee476 | 41 | #include <linux/thread_info.h> |
63f0c603 | 42 | #include <linux/prctl.h> |
4f62bb7c | 43 | #include <linux/stacktrace.h> |
b3901d54 | 44 | |
57f4959b | 45 | #include <asm/alternative.h> |
b3901d54 | 46 | #include <asm/compat.h> |
19c95f26 | 47 | #include <asm/cpufeature.h> |
b3901d54 | 48 | #include <asm/cacheflush.h> |
d0854412 | 49 | #include <asm/exec.h> |
ec45d1cf WD |
50 | #include <asm/fpsimd.h> |
51 | #include <asm/mmu_context.h> | |
637ec831 | 52 | #include <asm/mte.h> |
b3901d54 | 53 | #include <asm/processor.h> |
75031975 | 54 | #include <asm/pointer_auth.h> |
b3901d54 | 55 | #include <asm/stacktrace.h> |
baa96377 MS |
56 | #include <asm/switch_to.h> |
57 | #include <asm/system_misc.h> | |
b3901d54 | 58 | |
0a1213fa | 59 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) |
c0c264ae | 60 | #include <linux/stackprotector.h> |
9fcb2e93 | 61 | unsigned long __stack_chk_guard __ro_after_init; |
c0c264ae LA |
62 | EXPORT_SYMBOL(__stack_chk_guard); |
63 | #endif | |
64 | ||
b3901d54 CM |
65 | /* |
66 | * Function pointers to optional machine specific functions | |
67 | */ | |
68 | void (*pm_power_off)(void); | |
69 | EXPORT_SYMBOL_GPL(pm_power_off); | |
70 | ||
9327e2c6 | 71 | #ifdef CONFIG_HOTPLUG_CPU |
071c44e4 | 72 | void __noreturn arch_cpu_idle_dead(void) |
9327e2c6 MR |
73 | { |
74 | cpu_die(); | |
75 | } | |
76 | #endif | |
77 | ||
90f51a09 AK |
78 | /* |
79 | * Called by kexec, immediately prior to machine_kexec(). | |
80 | * | |
81 | * This must completely disable all secondary CPUs; simply causing those CPUs | |
82 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | |
83 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | |
84 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | |
d66b16f5 | 85 | * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this. |
90f51a09 | 86 | */ |
b3901d54 CM |
87 | void machine_shutdown(void) |
88 | { | |
5efbe6a6 | 89 | smp_shutdown_nonboot_cpus(reboot_cpu); |
b3901d54 CM |
90 | } |
91 | ||
90f51a09 AK |
92 | /* |
93 | * Halting simply requires that the secondary CPUs stop performing any | |
94 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
95 | * achieves this. | |
96 | */ | |
b3901d54 CM |
97 | void machine_halt(void) |
98 | { | |
b9acc49e | 99 | local_irq_disable(); |
90f51a09 | 100 | smp_send_stop(); |
b3901d54 CM |
101 | while (1); |
102 | } | |
103 | ||
90f51a09 AK |
104 | /* |
105 | * Power-off simply requires that the secondary CPUs stop performing any | |
106 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
107 | * achieves this. When the system power is turned off, it will take all CPUs | |
108 | * with it. | |
109 | */ | |
b3901d54 CM |
110 | void machine_power_off(void) |
111 | { | |
b9acc49e | 112 | local_irq_disable(); |
90f51a09 | 113 | smp_send_stop(); |
0c649914 | 114 | do_kernel_power_off(); |
b3901d54 CM |
115 | } |
116 | ||
90f51a09 AK |
117 | /* |
118 | * Restart requires that the secondary CPUs stop performing any activity | |
68234df4 | 119 | * while the primary CPU resets the system. Systems with multiple CPUs must |
90f51a09 AK |
120 | * provide a HW restart implementation, to ensure that all CPUs reset at once. |
121 | * This is required so that any code running after reset on the primary CPU | |
122 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | |
123 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | |
124 | * to use. Implementing such co-ordination would be essentially impossible. | |
125 | */ | |
b3901d54 CM |
126 | void machine_restart(char *cmd) |
127 | { | |
b3901d54 CM |
128 | /* Disable interrupts first */ |
129 | local_irq_disable(); | |
b9acc49e | 130 | smp_send_stop(); |
b3901d54 | 131 | |
60c0d45a AB |
132 | /* |
133 | * UpdateCapsule() depends on the system being reset via | |
134 | * ResetSystem(). | |
135 | */ | |
136 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | |
137 | efi_reboot(reboot_mode, NULL); | |
138 | ||
b3901d54 | 139 | /* Now call the architecture specific reboot code. */ |
ab6cef1d | 140 | do_kernel_restart(cmd); |
b3901d54 CM |
141 | |
142 | /* | |
143 | * Whoops - the architecture was unable to reboot. | |
144 | */ | |
145 | printk("Reboot failed -- System halted\n"); | |
146 | while (1); | |
147 | } | |
148 | ||
ec94a46e DM |
149 | #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str |
150 | static const char *const btypes[] = { | |
151 | bstr(NONE, "--"), | |
152 | bstr( JC, "jc"), | |
153 | bstr( C, "-c"), | |
154 | bstr( J , "j-") | |
155 | }; | |
156 | #undef bstr | |
157 | ||
b7300d4c WD |
158 | static void print_pstate(struct pt_regs *regs) |
159 | { | |
160 | u64 pstate = regs->pstate; | |
161 | ||
162 | if (compat_user_mode(regs)) { | |
ec63e300 | 163 | printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c %cDIT %cSSBS)\n", |
b7300d4c | 164 | pstate, |
d64567f6 MR |
165 | pstate & PSR_AA32_N_BIT ? 'N' : 'n', |
166 | pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', | |
167 | pstate & PSR_AA32_C_BIT ? 'C' : 'c', | |
168 | pstate & PSR_AA32_V_BIT ? 'V' : 'v', | |
169 | pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', | |
170 | pstate & PSR_AA32_T_BIT ? "T32" : "A32", | |
171 | pstate & PSR_AA32_E_BIT ? "BE" : "LE", | |
172 | pstate & PSR_AA32_A_BIT ? 'A' : 'a', | |
173 | pstate & PSR_AA32_I_BIT ? 'I' : 'i', | |
ec63e300 LH |
174 | pstate & PSR_AA32_F_BIT ? 'F' : 'f', |
175 | pstate & PSR_AA32_DIT_BIT ? '+' : '-', | |
176 | pstate & PSR_AA32_SSBS_BIT ? '+' : '-'); | |
b7300d4c | 177 | } else { |
ec94a46e DM |
178 | const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> |
179 | PSR_BTYPE_SHIFT]; | |
180 | ||
ec63e300 | 181 | printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO %cDIT %cSSBS BTYPE=%s)\n", |
b7300d4c WD |
182 | pstate, |
183 | pstate & PSR_N_BIT ? 'N' : 'n', | |
184 | pstate & PSR_Z_BIT ? 'Z' : 'z', | |
185 | pstate & PSR_C_BIT ? 'C' : 'c', | |
186 | pstate & PSR_V_BIT ? 'V' : 'v', | |
187 | pstate & PSR_D_BIT ? 'D' : 'd', | |
188 | pstate & PSR_A_BIT ? 'A' : 'a', | |
189 | pstate & PSR_I_BIT ? 'I' : 'i', | |
190 | pstate & PSR_F_BIT ? 'F' : 'f', | |
191 | pstate & PSR_PAN_BIT ? '+' : '-', | |
ec94a46e | 192 | pstate & PSR_UAO_BIT ? '+' : '-', |
637ec831 | 193 | pstate & PSR_TCO_BIT ? '+' : '-', |
ec63e300 LH |
194 | pstate & PSR_DIT_BIT ? '+' : '-', |
195 | pstate & PSR_SSBS_BIT ? '+' : '-', | |
ec94a46e | 196 | btype_str); |
b7300d4c WD |
197 | } |
198 | } | |
199 | ||
b3901d54 CM |
200 | void __show_regs(struct pt_regs *regs) |
201 | { | |
6ca68e80 CM |
202 | int i, top_reg; |
203 | u64 lr, sp; | |
204 | ||
205 | if (compat_user_mode(regs)) { | |
206 | lr = regs->compat_lr; | |
207 | sp = regs->compat_sp; | |
208 | top_reg = 12; | |
209 | } else { | |
210 | lr = regs->regs[30]; | |
211 | sp = regs->sp; | |
212 | top_reg = 29; | |
213 | } | |
b3901d54 | 214 | |
a43cb95d | 215 | show_regs_print_info(KERN_DEFAULT); |
b7300d4c | 216 | print_pstate(regs); |
a06f818a WD |
217 | |
218 | if (!user_mode(regs)) { | |
219 | printk("pc : %pS\n", (void *)regs->pc); | |
ca708599 | 220 | printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr)); |
a06f818a WD |
221 | } else { |
222 | printk("pc : %016llx\n", regs->pc); | |
223 | printk("lr : %016llx\n", lr); | |
224 | } | |
225 | ||
b7300d4c | 226 | printk("sp : %016llx\n", sp); |
db4b0710 | 227 | |
133d0518 JT |
228 | if (system_uses_irq_prio_masking()) |
229 | printk("pmr_save: %08llx\n", regs->pmr_save); | |
230 | ||
db4b0710 MR |
231 | i = top_reg; |
232 | ||
233 | while (i >= 0) { | |
0bca3ec8 | 234 | printk("x%-2d: %016llx", i, regs->regs[i]); |
db4b0710 | 235 | |
0bca3ec8 MWO |
236 | while (i-- % 3) |
237 | pr_cont(" x%-2d: %016llx", i, regs->regs[i]); | |
db4b0710 MR |
238 | |
239 | pr_cont("\n"); | |
b3901d54 | 240 | } |
b3901d54 CM |
241 | } |
242 | ||
d9f1b52a | 243 | void show_regs(struct pt_regs *regs) |
b3901d54 | 244 | { |
b3901d54 | 245 | __show_regs(regs); |
c7689837 | 246 | dump_backtrace(regs, NULL, KERN_DEFAULT); |
b3901d54 CM |
247 | } |
248 | ||
eb35bdd7 WD |
249 | static void tls_thread_flush(void) |
250 | { | |
adf75899 | 251 | write_sysreg(0, tpidr_el0); |
a9d69158 MB |
252 | if (system_supports_tpidr2()) |
253 | write_sysreg_s(0, SYS_TPIDR2_EL0); | |
eb35bdd7 WD |
254 | |
255 | if (is_compat_task()) { | |
65896545 | 256 | current->thread.uw.tp_value = 0; |
eb35bdd7 WD |
257 | |
258 | /* | |
259 | * We need to ensure ordering between the shadow state and the | |
260 | * hardware state, so that we don't corrupt the hardware state | |
261 | * with a stale shadow state during context switch. | |
262 | */ | |
263 | barrier(); | |
adf75899 | 264 | write_sysreg(0, tpidrro_el0); |
eb35bdd7 WD |
265 | } |
266 | } | |
267 | ||
63f0c603 CM |
268 | static void flush_tagged_addr_state(void) |
269 | { | |
270 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) | |
271 | clear_thread_flag(TIF_TAGGED_ADDR); | |
272 | } | |
273 | ||
b3901d54 CM |
274 | void flush_thread(void) |
275 | { | |
276 | fpsimd_flush_thread(); | |
eb35bdd7 | 277 | tls_thread_flush(); |
b3901d54 | 278 | flush_ptrace_hw_breakpoint(current); |
63f0c603 | 279 | flush_tagged_addr_state(); |
b3901d54 CM |
280 | } |
281 | ||
bc0ee476 DM |
282 | void arch_release_task_struct(struct task_struct *tsk) |
283 | { | |
284 | fpsimd_release_task(tsk); | |
285 | } | |
286 | ||
b3901d54 CM |
287 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
288 | { | |
6eb6c801 JL |
289 | if (current->mm) |
290 | fpsimd_preserve_current_state(); | |
b3901d54 | 291 | *dst = *src; |
bc0ee476 | 292 | |
4585fc59 MM |
293 | /* |
294 | * Detach src's sve_state (if any) from dst so that it does not | |
8bd7f91c | 295 | * get erroneously used or freed prematurely. dst's copies |
4585fc59 MM |
296 | * will be allocated on demand later on if dst uses SVE. |
297 | * For consistency, also clear TIF_SVE here: this could be done | |
298 | * later in copy_process(), but to avoid tripping up future | |
8bd7f91c | 299 | * maintainers it is best not to leave TIF flags and buffers in |
4585fc59 MM |
300 | * an inconsistent state, even temporarily. |
301 | */ | |
302 | dst->thread.sve_state = NULL; | |
303 | clear_tsk_thread_flag(dst, TIF_SVE); | |
304 | ||
8bd7f91c MB |
305 | /* |
306 | * In the unlikely event that we create a new thread with ZA | |
d6138b4a MB |
307 | * enabled we should retain the ZA and ZT state so duplicate |
308 | * it here. This may be shortly freed if we exec() or if | |
309 | * CLONE_SETTLS but it's simpler to do it here. To avoid | |
310 | * confusing the rest of the code ensure that we have a | |
311 | * sve_state allocated whenever sme_state is allocated. | |
8bd7f91c MB |
312 | */ |
313 | if (thread_za_enabled(&src->thread)) { | |
314 | dst->thread.sve_state = kzalloc(sve_state_size(src), | |
315 | GFP_KERNEL); | |
2e29b997 | 316 | if (!dst->thread.sve_state) |
8bd7f91c | 317 | return -ENOMEM; |
ce514000 MB |
318 | |
319 | dst->thread.sme_state = kmemdup(src->thread.sme_state, | |
320 | sme_state_size(src), | |
321 | GFP_KERNEL); | |
322 | if (!dst->thread.sme_state) { | |
8bd7f91c MB |
323 | kfree(dst->thread.sve_state); |
324 | dst->thread.sve_state = NULL; | |
325 | return -ENOMEM; | |
326 | } | |
327 | } else { | |
ce514000 | 328 | dst->thread.sme_state = NULL; |
8bd7f91c MB |
329 | clear_tsk_thread_flag(dst, TIF_SME); |
330 | } | |
b40c559b | 331 | |
baa85152 MB |
332 | dst->thread.fp_type = FP_STATE_FPSIMD; |
333 | ||
637ec831 VF |
334 | /* clear any pending asynchronous tag fault raised by the parent */ |
335 | clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); | |
336 | ||
b3901d54 CM |
337 | return 0; |
338 | } | |
339 | ||
340 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); | |
341 | ||
c5febea0 | 342 | int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) |
b3901d54 | 343 | { |
c5febea0 EB |
344 | unsigned long clone_flags = args->flags; |
345 | unsigned long stack_start = args->stack; | |
c5febea0 | 346 | unsigned long tls = args->tls; |
b3901d54 | 347 | struct pt_regs *childregs = task_pt_regs(p); |
b3901d54 | 348 | |
c34501d2 | 349 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
b3901d54 | 350 | |
071b6d4a DM |
351 | /* |
352 | * In case p was allocated the same task_struct pointer as some | |
353 | * other recently-exited task, make sure p is disassociated from | |
354 | * any cpu that may have run that now-exited task recently. | |
355 | * Otherwise we could erroneously skip reloading the FPSIMD | |
356 | * registers for p. | |
357 | */ | |
358 | fpsimd_flush_task_state(p); | |
359 | ||
33e45234 KM |
360 | ptrauth_thread_init_kernel(p); |
361 | ||
5bd2e97c | 362 | if (likely(!args->fn)) { |
9ac08002 | 363 | *childregs = *current_pt_regs(); |
c34501d2 | 364 | childregs->regs[0] = 0; |
d00a3810 WD |
365 | |
366 | /* | |
367 | * Read the current TLS pointer from tpidr_el0 as it may be | |
368 | * out-of-sync with the saved value. | |
369 | */ | |
adf75899 | 370 | *task_user_tls(p) = read_sysreg(tpidr_el0); |
a9d69158 MB |
371 | if (system_supports_tpidr2()) |
372 | p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); | |
d00a3810 WD |
373 | |
374 | if (stack_start) { | |
375 | if (is_compat_thread(task_thread_info(p))) | |
e0fd18ce | 376 | childregs->compat_sp = stack_start; |
d00a3810 | 377 | else |
e0fd18ce | 378 | childregs->sp = stack_start; |
c34501d2 | 379 | } |
d00a3810 | 380 | |
b3901d54 | 381 | /* |
a4376f2f | 382 | * If a TLS pointer was passed to clone, use it for the new |
a9d69158 | 383 | * thread. We also reset TPIDR2 if it's in use. |
b3901d54 | 384 | */ |
a9d69158 | 385 | if (clone_flags & CLONE_SETTLS) { |
a4376f2f | 386 | p->thread.uw.tp_value = tls; |
a9d69158 MB |
387 | p->thread.tpidr2_el0 = 0; |
388 | } | |
c34501d2 | 389 | } else { |
f80d0340 MR |
390 | /* |
391 | * A kthread has no context to ERET to, so ensure any buggy | |
392 | * ERET is treated as an illegal exception return. | |
393 | * | |
394 | * When a user task is created from a kthread, childregs will | |
395 | * be initialized by start_thread() or start_compat_thread(). | |
396 | */ | |
c34501d2 | 397 | memset(childregs, 0, sizeof(struct pt_regs)); |
f80d0340 | 398 | childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT; |
133d0518 | 399 | |
5bd2e97c EB |
400 | p->thread.cpu_context.x19 = (unsigned long)args->fn; |
401 | p->thread.cpu_context.x20 = (unsigned long)args->fn_arg; | |
b3901d54 | 402 | } |
b3901d54 | 403 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
c34501d2 | 404 | p->thread.cpu_context.sp = (unsigned long)childregs; |
7d7b720a MV |
405 | /* |
406 | * For the benefit of the unwinder, set up childregs->stackframe | |
407 | * as the final frame for the new task. | |
408 | */ | |
409 | p->thread.cpu_context.fp = (unsigned long)childregs->stackframe; | |
b3901d54 CM |
410 | |
411 | ptrace_hw_copy_thread(p); | |
412 | ||
413 | return 0; | |
414 | } | |
415 | ||
936eb65c DM |
416 | void tls_preserve_current_state(void) |
417 | { | |
418 | *task_user_tls(current) = read_sysreg(tpidr_el0); | |
a9d69158 MB |
419 | if (system_supports_tpidr2() && !is_compat_task()) |
420 | current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); | |
936eb65c DM |
421 | } |
422 | ||
b3901d54 CM |
423 | static void tls_thread_switch(struct task_struct *next) |
424 | { | |
936eb65c | 425 | tls_preserve_current_state(); |
b3901d54 | 426 | |
18011eac | 427 | if (is_compat_thread(task_thread_info(next))) |
65896545 | 428 | write_sysreg(next->thread.uw.tp_value, tpidrro_el0); |
18011eac WD |
429 | else if (!arm64_kernel_unmapped_at_el0()) |
430 | write_sysreg(0, tpidrro_el0); | |
b3901d54 | 431 | |
18011eac | 432 | write_sysreg(*task_user_tls(next), tpidr_el0); |
a9d69158 MB |
433 | if (system_supports_tpidr2()) |
434 | write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0); | |
b3901d54 CM |
435 | } |
436 | ||
cbdf8a18 MZ |
437 | /* |
438 | * Force SSBS state on context-switch, since it may be lost after migrating | |
439 | * from a CPU which treats the bit as RES0 in a heterogeneous system. | |
440 | */ | |
441 | static void ssbs_thread_switch(struct task_struct *next) | |
442 | { | |
cbdf8a18 MZ |
443 | /* |
444 | * Nothing to do for kernel threads, but 'regs' may be junk | |
445 | * (e.g. idle task) so check the flags and bail early. | |
446 | */ | |
447 | if (unlikely(next->flags & PF_KTHREAD)) | |
448 | return; | |
449 | ||
fca3d33d WD |
450 | /* |
451 | * If all CPUs implement the SSBS extension, then we just need to | |
452 | * context-switch the PSTATE field. | |
453 | */ | |
bc75d0c0 | 454 | if (alternative_has_cap_unlikely(ARM64_SSBS)) |
cbdf8a18 MZ |
455 | return; |
456 | ||
c2876207 | 457 | spectre_v4_enable_task_mitigation(next); |
cbdf8a18 MZ |
458 | } |
459 | ||
c02433dd MR |
460 | /* |
461 | * We store our current task in sp_el0, which is clobbered by userspace. Keep a | |
462 | * shadow copy so that we can restore this upon entry from userspace. | |
463 | * | |
464 | * This is *only* for exception entry from EL0, and is not valid until we | |
465 | * __switch_to() a user task. | |
466 | */ | |
467 | DEFINE_PER_CPU(struct task_struct *, __entry_task); | |
468 | ||
469 | static void entry_task_switch(struct task_struct *next) | |
470 | { | |
471 | __this_cpu_write(__entry_task, next); | |
472 | } | |
473 | ||
d49f7d73 MZ |
474 | /* |
475 | * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. | |
38e0257e SP |
476 | * Ensure access is disabled when switching to a 32bit task, ensure |
477 | * access is enabled when switching to a 64bit task. | |
d49f7d73 | 478 | */ |
38e0257e | 479 | static void erratum_1418040_thread_switch(struct task_struct *next) |
d49f7d73 | 480 | { |
38e0257e SP |
481 | if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || |
482 | !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) | |
d49f7d73 MZ |
483 | return; |
484 | ||
38e0257e SP |
485 | if (is_compat_thread(task_thread_info(next))) |
486 | sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); | |
d49f7d73 | 487 | else |
38e0257e SP |
488 | sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); |
489 | } | |
d49f7d73 | 490 | |
38e0257e SP |
491 | static void erratum_1418040_new_exec(void) |
492 | { | |
493 | preempt_disable(); | |
494 | erratum_1418040_thread_switch(current); | |
495 | preempt_enable(); | |
d49f7d73 MZ |
496 | } |
497 | ||
d2e0d8f9 PC |
498 | /* |
499 | * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore | |
500 | * this function must be called with preemption disabled and the update to | |
501 | * sctlr_user must be made in the same preemption disabled block so that | |
502 | * __switch_to() does not see the variable update before the SCTLR_EL1 one. | |
503 | */ | |
504 | void update_sctlr_el1(u64 sctlr) | |
2f79d2fc | 505 | { |
20169862 PC |
506 | /* |
507 | * EnIA must not be cleared while in the kernel as this is necessary for | |
508 | * in-kernel PAC. It will be cleared on kernel exit if needed. | |
509 | */ | |
510 | sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr); | |
2f79d2fc PC |
511 | |
512 | /* ISB required for the kernel uaccess routines when setting TCF0. */ | |
513 | isb(); | |
514 | } | |
515 | ||
b3901d54 CM |
516 | /* |
517 | * Thread switching. | |
518 | */ | |
86bcbafc MR |
519 | __notrace_funcgraph __sched |
520 | struct task_struct *__switch_to(struct task_struct *prev, | |
b3901d54 CM |
521 | struct task_struct *next) |
522 | { | |
523 | struct task_struct *last; | |
524 | ||
525 | fpsimd_thread_switch(next); | |
526 | tls_thread_switch(next); | |
527 | hw_breakpoint_thread_switch(next); | |
3325732f | 528 | contextidr_thread_switch(next); |
c02433dd | 529 | entry_task_switch(next); |
cbdf8a18 | 530 | ssbs_thread_switch(next); |
38e0257e | 531 | erratum_1418040_thread_switch(next); |
b90e4839 | 532 | ptrauth_thread_switch_user(next); |
b3901d54 | 533 | |
5108c67c CM |
534 | /* |
535 | * Complete any pending TLB or cache maintenance on this CPU in case | |
536 | * the thread migrates to a different CPU. | |
22e4ebb9 MD |
537 | * This full barrier is also required by the membarrier system |
538 | * call. | |
5108c67c | 539 | */ |
98f7685e | 540 | dsb(ish); |
b3901d54 | 541 | |
1c101da8 CM |
542 | /* |
543 | * MTE thread switching must happen after the DSB above to ensure that | |
544 | * any asynchronous tag check faults have been logged in the TFSR*_EL1 | |
545 | * registers. | |
546 | */ | |
547 | mte_thread_switch(next); | |
2f79d2fc PC |
548 | /* avoid expensive SCTLR_EL1 accesses if no change */ |
549 | if (prev->thread.sctlr_user != next->thread.sctlr_user) | |
550 | update_sctlr_el1(next->thread.sctlr_user); | |
1c101da8 | 551 | |
b3901d54 CM |
552 | /* the actual thread switch */ |
553 | last = cpu_switch_to(prev, next); | |
554 | ||
555 | return last; | |
556 | } | |
557 | ||
4f62bb7c MV |
558 | struct wchan_info { |
559 | unsigned long pc; | |
560 | int count; | |
561 | }; | |
562 | ||
563 | static bool get_wchan_cb(void *arg, unsigned long pc) | |
564 | { | |
565 | struct wchan_info *wchan_info = arg; | |
566 | ||
567 | if (!in_sched_functions(pc)) { | |
568 | wchan_info->pc = pc; | |
569 | return false; | |
570 | } | |
571 | return wchan_info->count++ < 16; | |
572 | } | |
573 | ||
42a20f86 | 574 | unsigned long __get_wchan(struct task_struct *p) |
b3901d54 | 575 | { |
4f62bb7c MV |
576 | struct wchan_info wchan_info = { |
577 | .pc = 0, | |
578 | .count = 0, | |
579 | }; | |
b3901d54 | 580 | |
4f62bb7c | 581 | if (!try_get_task_stack(p)) |
9bbd4c56 MR |
582 | return 0; |
583 | ||
4f62bb7c | 584 | arch_stack_walk(get_wchan_cb, &wchan_info, p, NULL); |
f3dcbe67 | 585 | |
9bbd4c56 | 586 | put_task_stack(p); |
4f62bb7c MV |
587 | |
588 | return wchan_info.pc; | |
b3901d54 CM |
589 | } |
590 | ||
591 | unsigned long arch_align_stack(unsigned long sp) | |
592 | { | |
593 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | |
8032bf12 | 594 | sp -= get_random_u32_below(PAGE_SIZE); |
b3901d54 CM |
595 | return sp & ~0xf; |
596 | } | |
597 | ||
08cd8f41 WD |
598 | #ifdef CONFIG_COMPAT |
599 | int compat_elf_check_arch(const struct elf32_hdr *hdr) | |
600 | { | |
601 | if (!system_supports_32bit_el0()) | |
602 | return false; | |
603 | ||
604 | if ((hdr)->e_machine != EM_ARM) | |
605 | return false; | |
606 | ||
607 | if (!((hdr)->e_flags & EF_ARM_EABI_MASK)) | |
608 | return false; | |
609 | ||
610 | /* | |
611 | * Prevent execve() of a 32-bit program from a deadline task | |
612 | * if the restricted affinity mask would be inadmissible on an | |
613 | * asymmetric system. | |
614 | */ | |
615 | return !static_branch_unlikely(&arm64_mismatched_32bit_el0) || | |
616 | !dl_task_check_affinity(current, system_32bit_el0_cpumask()); | |
617 | } | |
618 | #endif | |
619 | ||
d1be5c99 YN |
620 | /* |
621 | * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. | |
622 | */ | |
623 | void arch_setup_new_exec(void) | |
624 | { | |
873c3e89 WD |
625 | unsigned long mmflags = 0; |
626 | ||
627 | if (is_compat_task()) { | |
628 | mmflags = MMCF_AARCH32; | |
08cd8f41 WD |
629 | |
630 | /* | |
631 | * Restrict the CPU affinity mask for a 32-bit task so that | |
632 | * it contains only 32-bit-capable CPUs. | |
633 | * | |
634 | * From the perspective of the task, this looks similar to | |
635 | * what would happen if the 64-bit-only CPUs were hot-unplugged | |
636 | * at the point of execve(), although we try a bit harder to | |
637 | * honour the cpuset hierarchy. | |
638 | */ | |
873c3e89 | 639 | if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) |
08cd8f41 | 640 | force_compatible_cpus_allowed_ptr(current); |
08cd8f41 WD |
641 | } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) { |
642 | relax_compatible_cpus_allowed_ptr(current); | |
873c3e89 | 643 | } |
75031975 | 644 | |
873c3e89 | 645 | current->mm->context.flags = mmflags; |
20169862 PC |
646 | ptrauth_thread_init_user(); |
647 | mte_thread_init_user(); | |
38e0257e | 648 | erratum_1418040_new_exec(); |
780c083a WD |
649 | |
650 | if (task_spec_ssb_noexec(current)) { | |
651 | arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, | |
652 | PR_SPEC_ENABLE); | |
653 | } | |
d1be5c99 | 654 | } |
63f0c603 CM |
655 | |
656 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI | |
657 | /* | |
658 | * Control the relaxed ABI allowing tagged user addresses into the kernel. | |
659 | */ | |
413235fc | 660 | static unsigned int tagged_addr_disabled; |
63f0c603 | 661 | |
93f067f6 | 662 | long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) |
63f0c603 | 663 | { |
1c101da8 | 664 | unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE; |
93f067f6 | 665 | struct thread_info *ti = task_thread_info(task); |
1c101da8 | 666 | |
93f067f6 | 667 | if (is_compat_thread(ti)) |
63f0c603 | 668 | return -EINVAL; |
1c101da8 CM |
669 | |
670 | if (system_supports_mte()) | |
766121ba MB |
671 | valid_mask |= PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC \ |
672 | | PR_MTE_TAG_MASK; | |
1c101da8 CM |
673 | |
674 | if (arg & ~valid_mask) | |
63f0c603 CM |
675 | return -EINVAL; |
676 | ||
413235fc CM |
677 | /* |
678 | * Do not allow the enabling of the tagged address ABI if globally | |
679 | * disabled via sysctl abi.tagged_addr_disabled. | |
680 | */ | |
681 | if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) | |
682 | return -EINVAL; | |
683 | ||
93f067f6 | 684 | if (set_mte_ctrl(task, arg) != 0) |
1c101da8 CM |
685 | return -EINVAL; |
686 | ||
93f067f6 | 687 | update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); |
63f0c603 CM |
688 | |
689 | return 0; | |
690 | } | |
691 | ||
93f067f6 | 692 | long get_tagged_addr_ctrl(struct task_struct *task) |
63f0c603 | 693 | { |
1c101da8 | 694 | long ret = 0; |
93f067f6 | 695 | struct thread_info *ti = task_thread_info(task); |
1c101da8 | 696 | |
93f067f6 | 697 | if (is_compat_thread(ti)) |
63f0c603 CM |
698 | return -EINVAL; |
699 | ||
93f067f6 | 700 | if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR)) |
1c101da8 | 701 | ret = PR_TAGGED_ADDR_ENABLE; |
63f0c603 | 702 | |
93f067f6 | 703 | ret |= get_mte_ctrl(task); |
1c101da8 CM |
704 | |
705 | return ret; | |
63f0c603 CM |
706 | } |
707 | ||
708 | /* | |
709 | * Global sysctl to disable the tagged user addresses support. This control | |
710 | * only prevents the tagged address ABI enabling via prctl() and does not | |
711 | * disable it for tasks that already opted in to the relaxed ABI. | |
712 | */ | |
63f0c603 CM |
713 | |
714 | static struct ctl_table tagged_addr_sysctl_table[] = { | |
715 | { | |
413235fc | 716 | .procname = "tagged_addr_disabled", |
63f0c603 | 717 | .mode = 0644, |
413235fc | 718 | .data = &tagged_addr_disabled, |
63f0c603 CM |
719 | .maxlen = sizeof(int), |
720 | .proc_handler = proc_dointvec_minmax, | |
2c614c11 MC |
721 | .extra1 = SYSCTL_ZERO, |
722 | .extra2 = SYSCTL_ONE, | |
63f0c603 | 723 | }, |
63f0c603 CM |
724 | }; |
725 | ||
726 | static int __init tagged_addr_init(void) | |
727 | { | |
728 | if (!register_sysctl("abi", tagged_addr_sysctl_table)) | |
729 | return -EINVAL; | |
730 | return 0; | |
731 | } | |
732 | ||
733 | core_initcall(tagged_addr_init); | |
734 | #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ | |
19c95f26 | 735 | |
ab7876a9 DM |
736 | #ifdef CONFIG_BINFMT_ELF |
737 | int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, | |
738 | bool has_interp, bool is_interp) | |
739 | { | |
5d1b631c MB |
740 | /* |
741 | * For dynamically linked executables the interpreter is | |
742 | * responsible for setting PROT_BTI on everything except | |
743 | * itself. | |
744 | */ | |
ab7876a9 DM |
745 | if (is_interp != has_interp) |
746 | return prot; | |
747 | ||
748 | if (!(state->flags & ARM64_ELF_BTI)) | |
749 | return prot; | |
750 | ||
751 | if (prot & PROT_EXEC) | |
752 | prot |= PROT_BTI; | |
753 | ||
754 | return prot; | |
755 | } | |
756 | #endif |