Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b3901d54 CM |
2 | /* |
3 | * Based on arch/arm/kernel/process.c | |
4 | * | |
5 | * Original Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | |
7 | * Copyright (C) 2012 ARM Ltd. | |
b3901d54 CM |
8 | */ |
9 | ||
10 | #include <stdarg.h> | |
11 | ||
fd92d4a5 | 12 | #include <linux/compat.h> |
60c0d45a | 13 | #include <linux/efi.h> |
ab7876a9 | 14 | #include <linux/elf.h> |
b3901d54 CM |
15 | #include <linux/export.h> |
16 | #include <linux/sched.h> | |
b17b0153 | 17 | #include <linux/sched/debug.h> |
29930025 | 18 | #include <linux/sched/task.h> |
68db0cf1 | 19 | #include <linux/sched/task_stack.h> |
b3901d54 | 20 | #include <linux/kernel.h> |
19c95f26 | 21 | #include <linux/lockdep.h> |
ab7876a9 | 22 | #include <linux/mman.h> |
b3901d54 CM |
23 | #include <linux/mm.h> |
24 | #include <linux/stddef.h> | |
63f0c603 | 25 | #include <linux/sysctl.h> |
b3901d54 CM |
26 | #include <linux/unistd.h> |
27 | #include <linux/user.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/reboot.h> | |
30 | #include <linux/interrupt.h> | |
b3901d54 CM |
31 | #include <linux/init.h> |
32 | #include <linux/cpu.h> | |
33 | #include <linux/elfcore.h> | |
34 | #include <linux/pm.h> | |
35 | #include <linux/tick.h> | |
36 | #include <linux/utsname.h> | |
37 | #include <linux/uaccess.h> | |
38 | #include <linux/random.h> | |
39 | #include <linux/hw_breakpoint.h> | |
40 | #include <linux/personality.h> | |
41 | #include <linux/notifier.h> | |
096b3224 | 42 | #include <trace/events/power.h> |
c02433dd | 43 | #include <linux/percpu.h> |
bc0ee476 | 44 | #include <linux/thread_info.h> |
63f0c603 | 45 | #include <linux/prctl.h> |
b3901d54 | 46 | |
57f4959b | 47 | #include <asm/alternative.h> |
a9806aa2 | 48 | #include <asm/arch_gicv3.h> |
b3901d54 | 49 | #include <asm/compat.h> |
19c95f26 | 50 | #include <asm/cpufeature.h> |
b3901d54 | 51 | #include <asm/cacheflush.h> |
d0854412 | 52 | #include <asm/exec.h> |
ec45d1cf WD |
53 | #include <asm/fpsimd.h> |
54 | #include <asm/mmu_context.h> | |
b3901d54 | 55 | #include <asm/processor.h> |
75031975 | 56 | #include <asm/pointer_auth.h> |
b3901d54 | 57 | #include <asm/stacktrace.h> |
b3901d54 | 58 | |
0a1213fa | 59 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) |
c0c264ae LA |
60 | #include <linux/stackprotector.h> |
61 | unsigned long __stack_chk_guard __read_mostly; | |
62 | EXPORT_SYMBOL(__stack_chk_guard); | |
63 | #endif | |
64 | ||
b3901d54 CM |
65 | /* |
66 | * Function pointers to optional machine specific functions | |
67 | */ | |
68 | void (*pm_power_off)(void); | |
69 | EXPORT_SYMBOL_GPL(pm_power_off); | |
70 | ||
b0946fc8 | 71 | void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); |
b3901d54 | 72 | |
a9806aa2 JT |
73 | static void __cpu_do_idle(void) |
74 | { | |
75 | dsb(sy); | |
76 | wfi(); | |
77 | } | |
78 | ||
79 | static void __cpu_do_idle_irqprio(void) | |
80 | { | |
81 | unsigned long pmr; | |
82 | unsigned long daif_bits; | |
83 | ||
84 | daif_bits = read_sysreg(daif); | |
85 | write_sysreg(daif_bits | PSR_I_BIT, daif); | |
86 | ||
87 | /* | |
88 | * Unmask PMR before going idle to make sure interrupts can | |
89 | * be raised. | |
90 | */ | |
91 | pmr = gic_read_pmr(); | |
bd82d4bd | 92 | gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); |
a9806aa2 JT |
93 | |
94 | __cpu_do_idle(); | |
95 | ||
96 | gic_write_pmr(pmr); | |
97 | write_sysreg(daif_bits, daif); | |
98 | } | |
99 | ||
100 | /* | |
101 | * cpu_do_idle() | |
102 | * | |
103 | * Idle the processor (wait for interrupt). | |
104 | * | |
105 | * If the CPU supports priority masking we must do additional work to | |
106 | * ensure that interrupts are not masked at the PMR (because the core will | |
107 | * not wake up if we block the wake up signal in the interrupt controller). | |
108 | */ | |
109 | void cpu_do_idle(void) | |
110 | { | |
111 | if (system_uses_irq_prio_masking()) | |
112 | __cpu_do_idle_irqprio(); | |
113 | else | |
114 | __cpu_do_idle(); | |
115 | } | |
116 | ||
b3901d54 CM |
117 | /* |
118 | * This is our default idle handler. | |
119 | */ | |
0087298f | 120 | void arch_cpu_idle(void) |
b3901d54 CM |
121 | { |
122 | /* | |
123 | * This should do all the clock switching and wait for interrupt | |
124 | * tricks | |
125 | */ | |
096b3224 | 126 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
6990566b NP |
127 | cpu_do_idle(); |
128 | local_irq_enable(); | |
096b3224 | 129 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
b3901d54 CM |
130 | } |
131 | ||
9327e2c6 MR |
132 | #ifdef CONFIG_HOTPLUG_CPU |
133 | void arch_cpu_idle_dead(void) | |
134 | { | |
135 | cpu_die(); | |
136 | } | |
137 | #endif | |
138 | ||
90f51a09 AK |
139 | /* |
140 | * Called by kexec, immediately prior to machine_kexec(). | |
141 | * | |
142 | * This must completely disable all secondary CPUs; simply causing those CPUs | |
143 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | |
144 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | |
145 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | |
d66b16f5 | 146 | * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this. |
90f51a09 | 147 | */ |
b3901d54 CM |
148 | void machine_shutdown(void) |
149 | { | |
5efbe6a6 | 150 | smp_shutdown_nonboot_cpus(reboot_cpu); |
b3901d54 CM |
151 | } |
152 | ||
90f51a09 AK |
153 | /* |
154 | * Halting simply requires that the secondary CPUs stop performing any | |
155 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
156 | * achieves this. | |
157 | */ | |
b3901d54 CM |
158 | void machine_halt(void) |
159 | { | |
b9acc49e | 160 | local_irq_disable(); |
90f51a09 | 161 | smp_send_stop(); |
b3901d54 CM |
162 | while (1); |
163 | } | |
164 | ||
90f51a09 AK |
165 | /* |
166 | * Power-off simply requires that the secondary CPUs stop performing any | |
167 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
168 | * achieves this. When the system power is turned off, it will take all CPUs | |
169 | * with it. | |
170 | */ | |
b3901d54 CM |
171 | void machine_power_off(void) |
172 | { | |
b9acc49e | 173 | local_irq_disable(); |
90f51a09 | 174 | smp_send_stop(); |
b3901d54 CM |
175 | if (pm_power_off) |
176 | pm_power_off(); | |
177 | } | |
178 | ||
90f51a09 AK |
179 | /* |
180 | * Restart requires that the secondary CPUs stop performing any activity | |
68234df4 | 181 | * while the primary CPU resets the system. Systems with multiple CPUs must |
90f51a09 AK |
182 | * provide a HW restart implementation, to ensure that all CPUs reset at once. |
183 | * This is required so that any code running after reset on the primary CPU | |
184 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | |
185 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | |
186 | * to use. Implementing such co-ordination would be essentially impossible. | |
187 | */ | |
b3901d54 CM |
188 | void machine_restart(char *cmd) |
189 | { | |
b3901d54 CM |
190 | /* Disable interrupts first */ |
191 | local_irq_disable(); | |
b9acc49e | 192 | smp_send_stop(); |
b3901d54 | 193 | |
60c0d45a AB |
194 | /* |
195 | * UpdateCapsule() depends on the system being reset via | |
196 | * ResetSystem(). | |
197 | */ | |
198 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | |
199 | efi_reboot(reboot_mode, NULL); | |
200 | ||
b3901d54 | 201 | /* Now call the architecture specific reboot code. */ |
aa1e8ec1 | 202 | if (arm_pm_restart) |
ff701306 | 203 | arm_pm_restart(reboot_mode, cmd); |
1c7ffc32 GR |
204 | else |
205 | do_kernel_restart(cmd); | |
b3901d54 CM |
206 | |
207 | /* | |
208 | * Whoops - the architecture was unable to reboot. | |
209 | */ | |
210 | printk("Reboot failed -- System halted\n"); | |
211 | while (1); | |
212 | } | |
213 | ||
ec94a46e DM |
214 | #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str |
215 | static const char *const btypes[] = { | |
216 | bstr(NONE, "--"), | |
217 | bstr( JC, "jc"), | |
218 | bstr( C, "-c"), | |
219 | bstr( J , "j-") | |
220 | }; | |
221 | #undef bstr | |
222 | ||
b7300d4c WD |
223 | static void print_pstate(struct pt_regs *regs) |
224 | { | |
225 | u64 pstate = regs->pstate; | |
226 | ||
227 | if (compat_user_mode(regs)) { | |
228 | printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", | |
229 | pstate, | |
d64567f6 MR |
230 | pstate & PSR_AA32_N_BIT ? 'N' : 'n', |
231 | pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', | |
232 | pstate & PSR_AA32_C_BIT ? 'C' : 'c', | |
233 | pstate & PSR_AA32_V_BIT ? 'V' : 'v', | |
234 | pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', | |
235 | pstate & PSR_AA32_T_BIT ? "T32" : "A32", | |
236 | pstate & PSR_AA32_E_BIT ? "BE" : "LE", | |
237 | pstate & PSR_AA32_A_BIT ? 'A' : 'a', | |
238 | pstate & PSR_AA32_I_BIT ? 'I' : 'i', | |
239 | pstate & PSR_AA32_F_BIT ? 'F' : 'f'); | |
b7300d4c | 240 | } else { |
ec94a46e DM |
241 | const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> |
242 | PSR_BTYPE_SHIFT]; | |
243 | ||
244 | printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO BTYPE=%s)\n", | |
b7300d4c WD |
245 | pstate, |
246 | pstate & PSR_N_BIT ? 'N' : 'n', | |
247 | pstate & PSR_Z_BIT ? 'Z' : 'z', | |
248 | pstate & PSR_C_BIT ? 'C' : 'c', | |
249 | pstate & PSR_V_BIT ? 'V' : 'v', | |
250 | pstate & PSR_D_BIT ? 'D' : 'd', | |
251 | pstate & PSR_A_BIT ? 'A' : 'a', | |
252 | pstate & PSR_I_BIT ? 'I' : 'i', | |
253 | pstate & PSR_F_BIT ? 'F' : 'f', | |
254 | pstate & PSR_PAN_BIT ? '+' : '-', | |
ec94a46e DM |
255 | pstate & PSR_UAO_BIT ? '+' : '-', |
256 | btype_str); | |
b7300d4c WD |
257 | } |
258 | } | |
259 | ||
b3901d54 CM |
260 | void __show_regs(struct pt_regs *regs) |
261 | { | |
6ca68e80 CM |
262 | int i, top_reg; |
263 | u64 lr, sp; | |
264 | ||
265 | if (compat_user_mode(regs)) { | |
266 | lr = regs->compat_lr; | |
267 | sp = regs->compat_sp; | |
268 | top_reg = 12; | |
269 | } else { | |
270 | lr = regs->regs[30]; | |
271 | sp = regs->sp; | |
272 | top_reg = 29; | |
273 | } | |
b3901d54 | 274 | |
a43cb95d | 275 | show_regs_print_info(KERN_DEFAULT); |
b7300d4c | 276 | print_pstate(regs); |
a06f818a WD |
277 | |
278 | if (!user_mode(regs)) { | |
279 | printk("pc : %pS\n", (void *)regs->pc); | |
cdcb61ae | 280 | printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr)); |
a06f818a WD |
281 | } else { |
282 | printk("pc : %016llx\n", regs->pc); | |
283 | printk("lr : %016llx\n", lr); | |
284 | } | |
285 | ||
b7300d4c | 286 | printk("sp : %016llx\n", sp); |
db4b0710 | 287 | |
133d0518 JT |
288 | if (system_uses_irq_prio_masking()) |
289 | printk("pmr_save: %08llx\n", regs->pmr_save); | |
290 | ||
db4b0710 MR |
291 | i = top_reg; |
292 | ||
293 | while (i >= 0) { | |
b3901d54 | 294 | printk("x%-2d: %016llx ", i, regs->regs[i]); |
db4b0710 MR |
295 | i--; |
296 | ||
297 | if (i % 2 == 0) { | |
298 | pr_cont("x%-2d: %016llx ", i, regs->regs[i]); | |
299 | i--; | |
300 | } | |
301 | ||
302 | pr_cont("\n"); | |
b3901d54 | 303 | } |
b3901d54 CM |
304 | } |
305 | ||
306 | void show_regs(struct pt_regs * regs) | |
307 | { | |
b3901d54 | 308 | __show_regs(regs); |
1149aad1 | 309 | dump_backtrace(regs, NULL); |
b3901d54 CM |
310 | } |
311 | ||
eb35bdd7 WD |
312 | static void tls_thread_flush(void) |
313 | { | |
adf75899 | 314 | write_sysreg(0, tpidr_el0); |
eb35bdd7 WD |
315 | |
316 | if (is_compat_task()) { | |
65896545 | 317 | current->thread.uw.tp_value = 0; |
eb35bdd7 WD |
318 | |
319 | /* | |
320 | * We need to ensure ordering between the shadow state and the | |
321 | * hardware state, so that we don't corrupt the hardware state | |
322 | * with a stale shadow state during context switch. | |
323 | */ | |
324 | barrier(); | |
adf75899 | 325 | write_sysreg(0, tpidrro_el0); |
eb35bdd7 WD |
326 | } |
327 | } | |
328 | ||
63f0c603 CM |
329 | static void flush_tagged_addr_state(void) |
330 | { | |
331 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) | |
332 | clear_thread_flag(TIF_TAGGED_ADDR); | |
333 | } | |
334 | ||
b3901d54 CM |
335 | void flush_thread(void) |
336 | { | |
337 | fpsimd_flush_thread(); | |
eb35bdd7 | 338 | tls_thread_flush(); |
b3901d54 | 339 | flush_ptrace_hw_breakpoint(current); |
63f0c603 | 340 | flush_tagged_addr_state(); |
b3901d54 CM |
341 | } |
342 | ||
343 | void release_thread(struct task_struct *dead_task) | |
344 | { | |
345 | } | |
346 | ||
bc0ee476 DM |
347 | void arch_release_task_struct(struct task_struct *tsk) |
348 | { | |
349 | fpsimd_release_task(tsk); | |
350 | } | |
351 | ||
b3901d54 CM |
352 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
353 | { | |
6eb6c801 JL |
354 | if (current->mm) |
355 | fpsimd_preserve_current_state(); | |
b3901d54 | 356 | *dst = *src; |
bc0ee476 | 357 | |
4585fc59 MM |
358 | /* We rely on the above assignment to initialize dst's thread_flags: */ |
359 | BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)); | |
360 | ||
361 | /* | |
362 | * Detach src's sve_state (if any) from dst so that it does not | |
363 | * get erroneously used or freed prematurely. dst's sve_state | |
364 | * will be allocated on demand later on if dst uses SVE. | |
365 | * For consistency, also clear TIF_SVE here: this could be done | |
366 | * later in copy_process(), but to avoid tripping up future | |
367 | * maintainers it is best not to leave TIF_SVE and sve_state in | |
368 | * an inconsistent state, even temporarily. | |
369 | */ | |
370 | dst->thread.sve_state = NULL; | |
371 | clear_tsk_thread_flag(dst, TIF_SVE); | |
372 | ||
b3901d54 CM |
373 | return 0; |
374 | } | |
375 | ||
376 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); | |
377 | ||
a4376f2f AA |
378 | int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start, |
379 | unsigned long stk_sz, struct task_struct *p, unsigned long tls) | |
b3901d54 CM |
380 | { |
381 | struct pt_regs *childregs = task_pt_regs(p); | |
b3901d54 | 382 | |
c34501d2 | 383 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
b3901d54 | 384 | |
071b6d4a DM |
385 | /* |
386 | * In case p was allocated the same task_struct pointer as some | |
387 | * other recently-exited task, make sure p is disassociated from | |
388 | * any cpu that may have run that now-exited task recently. | |
389 | * Otherwise we could erroneously skip reloading the FPSIMD | |
390 | * registers for p. | |
391 | */ | |
392 | fpsimd_flush_task_state(p); | |
393 | ||
33e45234 KM |
394 | ptrauth_thread_init_kernel(p); |
395 | ||
9ac08002 AV |
396 | if (likely(!(p->flags & PF_KTHREAD))) { |
397 | *childregs = *current_pt_regs(); | |
c34501d2 | 398 | childregs->regs[0] = 0; |
d00a3810 WD |
399 | |
400 | /* | |
401 | * Read the current TLS pointer from tpidr_el0 as it may be | |
402 | * out-of-sync with the saved value. | |
403 | */ | |
adf75899 | 404 | *task_user_tls(p) = read_sysreg(tpidr_el0); |
d00a3810 WD |
405 | |
406 | if (stack_start) { | |
407 | if (is_compat_thread(task_thread_info(p))) | |
e0fd18ce | 408 | childregs->compat_sp = stack_start; |
d00a3810 | 409 | else |
e0fd18ce | 410 | childregs->sp = stack_start; |
c34501d2 | 411 | } |
d00a3810 | 412 | |
b3901d54 | 413 | /* |
a4376f2f AA |
414 | * If a TLS pointer was passed to clone, use it for the new |
415 | * thread. | |
b3901d54 | 416 | */ |
c34501d2 | 417 | if (clone_flags & CLONE_SETTLS) |
a4376f2f | 418 | p->thread.uw.tp_value = tls; |
c34501d2 CM |
419 | } else { |
420 | memset(childregs, 0, sizeof(struct pt_regs)); | |
421 | childregs->pstate = PSR_MODE_EL1h; | |
57f4959b | 422 | if (IS_ENABLED(CONFIG_ARM64_UAO) && |
a4023f68 | 423 | cpus_have_const_cap(ARM64_HAS_UAO)) |
57f4959b | 424 | childregs->pstate |= PSR_UAO_BIT; |
8f04e8e6 WD |
425 | |
426 | if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) | |
cbdf8a18 | 427 | set_ssbs_bit(childregs); |
8f04e8e6 | 428 | |
133d0518 JT |
429 | if (system_uses_irq_prio_masking()) |
430 | childregs->pmr_save = GIC_PRIO_IRQON; | |
431 | ||
c34501d2 CM |
432 | p->thread.cpu_context.x19 = stack_start; |
433 | p->thread.cpu_context.x20 = stk_sz; | |
b3901d54 | 434 | } |
b3901d54 | 435 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
c34501d2 | 436 | p->thread.cpu_context.sp = (unsigned long)childregs; |
b3901d54 CM |
437 | |
438 | ptrace_hw_copy_thread(p); | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
936eb65c DM |
443 | void tls_preserve_current_state(void) |
444 | { | |
445 | *task_user_tls(current) = read_sysreg(tpidr_el0); | |
446 | } | |
447 | ||
b3901d54 CM |
448 | static void tls_thread_switch(struct task_struct *next) |
449 | { | |
936eb65c | 450 | tls_preserve_current_state(); |
b3901d54 | 451 | |
18011eac | 452 | if (is_compat_thread(task_thread_info(next))) |
65896545 | 453 | write_sysreg(next->thread.uw.tp_value, tpidrro_el0); |
18011eac WD |
454 | else if (!arm64_kernel_unmapped_at_el0()) |
455 | write_sysreg(0, tpidrro_el0); | |
b3901d54 | 456 | |
18011eac | 457 | write_sysreg(*task_user_tls(next), tpidr_el0); |
b3901d54 CM |
458 | } |
459 | ||
57f4959b | 460 | /* Restore the UAO state depending on next's addr_limit */ |
d0854412 | 461 | void uao_thread_switch(struct task_struct *next) |
57f4959b | 462 | { |
e950631e CM |
463 | if (IS_ENABLED(CONFIG_ARM64_UAO)) { |
464 | if (task_thread_info(next)->addr_limit == KERNEL_DS) | |
465 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); | |
466 | else | |
467 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO)); | |
468 | } | |
57f4959b JM |
469 | } |
470 | ||
cbdf8a18 MZ |
471 | /* |
472 | * Force SSBS state on context-switch, since it may be lost after migrating | |
473 | * from a CPU which treats the bit as RES0 in a heterogeneous system. | |
474 | */ | |
475 | static void ssbs_thread_switch(struct task_struct *next) | |
476 | { | |
477 | struct pt_regs *regs = task_pt_regs(next); | |
478 | ||
479 | /* | |
480 | * Nothing to do for kernel threads, but 'regs' may be junk | |
481 | * (e.g. idle task) so check the flags and bail early. | |
482 | */ | |
483 | if (unlikely(next->flags & PF_KTHREAD)) | |
484 | return; | |
485 | ||
fca3d33d WD |
486 | /* |
487 | * If all CPUs implement the SSBS extension, then we just need to | |
488 | * context-switch the PSTATE field. | |
489 | */ | |
490 | if (cpu_have_feature(cpu_feature(SSBS))) | |
491 | return; | |
492 | ||
cbdf8a18 MZ |
493 | /* If the mitigation is enabled, then we leave SSBS clear. */ |
494 | if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || | |
495 | test_tsk_thread_flag(next, TIF_SSBD)) | |
496 | return; | |
497 | ||
498 | if (compat_user_mode(regs)) | |
499 | set_compat_ssbs_bit(regs); | |
500 | else if (user_mode(regs)) | |
501 | set_ssbs_bit(regs); | |
502 | } | |
503 | ||
c02433dd MR |
504 | /* |
505 | * We store our current task in sp_el0, which is clobbered by userspace. Keep a | |
506 | * shadow copy so that we can restore this upon entry from userspace. | |
507 | * | |
508 | * This is *only* for exception entry from EL0, and is not valid until we | |
509 | * __switch_to() a user task. | |
510 | */ | |
511 | DEFINE_PER_CPU(struct task_struct *, __entry_task); | |
512 | ||
513 | static void entry_task_switch(struct task_struct *next) | |
514 | { | |
515 | __this_cpu_write(__entry_task, next); | |
516 | } | |
517 | ||
b3901d54 CM |
518 | /* |
519 | * Thread switching. | |
520 | */ | |
8f4b326d | 521 | __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, |
b3901d54 CM |
522 | struct task_struct *next) |
523 | { | |
524 | struct task_struct *last; | |
525 | ||
526 | fpsimd_thread_switch(next); | |
527 | tls_thread_switch(next); | |
528 | hw_breakpoint_thread_switch(next); | |
3325732f | 529 | contextidr_thread_switch(next); |
c02433dd | 530 | entry_task_switch(next); |
57f4959b | 531 | uao_thread_switch(next); |
cbdf8a18 | 532 | ssbs_thread_switch(next); |
b3901d54 | 533 | |
5108c67c CM |
534 | /* |
535 | * Complete any pending TLB or cache maintenance on this CPU in case | |
536 | * the thread migrates to a different CPU. | |
22e4ebb9 MD |
537 | * This full barrier is also required by the membarrier system |
538 | * call. | |
5108c67c | 539 | */ |
98f7685e | 540 | dsb(ish); |
b3901d54 CM |
541 | |
542 | /* the actual thread switch */ | |
543 | last = cpu_switch_to(prev, next); | |
544 | ||
545 | return last; | |
546 | } | |
547 | ||
b3901d54 CM |
548 | unsigned long get_wchan(struct task_struct *p) |
549 | { | |
550 | struct stackframe frame; | |
9bbd4c56 | 551 | unsigned long stack_page, ret = 0; |
b3901d54 CM |
552 | int count = 0; |
553 | if (!p || p == current || p->state == TASK_RUNNING) | |
554 | return 0; | |
555 | ||
9bbd4c56 MR |
556 | stack_page = (unsigned long)try_get_task_stack(p); |
557 | if (!stack_page) | |
558 | return 0; | |
559 | ||
f3dcbe67 DM |
560 | start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p)); |
561 | ||
b3901d54 | 562 | do { |
31e43ad3 | 563 | if (unwind_frame(p, &frame)) |
9bbd4c56 MR |
564 | goto out; |
565 | if (!in_sched_functions(frame.pc)) { | |
566 | ret = frame.pc; | |
567 | goto out; | |
568 | } | |
b3901d54 | 569 | } while (count ++ < 16); |
9bbd4c56 MR |
570 | |
571 | out: | |
572 | put_task_stack(p); | |
573 | return ret; | |
b3901d54 CM |
574 | } |
575 | ||
576 | unsigned long arch_align_stack(unsigned long sp) | |
577 | { | |
578 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | |
579 | sp -= get_random_int() & ~PAGE_MASK; | |
580 | return sp & ~0xf; | |
581 | } | |
582 | ||
d1be5c99 YN |
583 | /* |
584 | * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. | |
585 | */ | |
586 | void arch_setup_new_exec(void) | |
587 | { | |
588 | current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; | |
75031975 MR |
589 | |
590 | ptrauth_thread_init_user(current); | |
d1be5c99 | 591 | } |
63f0c603 CM |
592 | |
593 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI | |
594 | /* | |
595 | * Control the relaxed ABI allowing tagged user addresses into the kernel. | |
596 | */ | |
413235fc | 597 | static unsigned int tagged_addr_disabled; |
63f0c603 CM |
598 | |
599 | long set_tagged_addr_ctrl(unsigned long arg) | |
600 | { | |
63f0c603 CM |
601 | if (is_compat_task()) |
602 | return -EINVAL; | |
603 | if (arg & ~PR_TAGGED_ADDR_ENABLE) | |
604 | return -EINVAL; | |
605 | ||
413235fc CM |
606 | /* |
607 | * Do not allow the enabling of the tagged address ABI if globally | |
608 | * disabled via sysctl abi.tagged_addr_disabled. | |
609 | */ | |
610 | if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) | |
611 | return -EINVAL; | |
612 | ||
63f0c603 CM |
613 | update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); |
614 | ||
615 | return 0; | |
616 | } | |
617 | ||
618 | long get_tagged_addr_ctrl(void) | |
619 | { | |
63f0c603 CM |
620 | if (is_compat_task()) |
621 | return -EINVAL; | |
622 | ||
623 | if (test_thread_flag(TIF_TAGGED_ADDR)) | |
624 | return PR_TAGGED_ADDR_ENABLE; | |
625 | ||
626 | return 0; | |
627 | } | |
628 | ||
629 | /* | |
630 | * Global sysctl to disable the tagged user addresses support. This control | |
631 | * only prevents the tagged address ABI enabling via prctl() and does not | |
632 | * disable it for tasks that already opted in to the relaxed ABI. | |
633 | */ | |
63f0c603 CM |
634 | |
635 | static struct ctl_table tagged_addr_sysctl_table[] = { | |
636 | { | |
413235fc | 637 | .procname = "tagged_addr_disabled", |
63f0c603 | 638 | .mode = 0644, |
413235fc | 639 | .data = &tagged_addr_disabled, |
63f0c603 CM |
640 | .maxlen = sizeof(int), |
641 | .proc_handler = proc_dointvec_minmax, | |
2c614c11 MC |
642 | .extra1 = SYSCTL_ZERO, |
643 | .extra2 = SYSCTL_ONE, | |
63f0c603 CM |
644 | }, |
645 | { } | |
646 | }; | |
647 | ||
648 | static int __init tagged_addr_init(void) | |
649 | { | |
650 | if (!register_sysctl("abi", tagged_addr_sysctl_table)) | |
651 | return -EINVAL; | |
652 | return 0; | |
653 | } | |
654 | ||
655 | core_initcall(tagged_addr_init); | |
656 | #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ | |
19c95f26 JT |
657 | |
658 | asmlinkage void __sched arm64_preempt_schedule_irq(void) | |
659 | { | |
660 | lockdep_assert_irqs_disabled(); | |
661 | ||
662 | /* | |
663 | * Preempting a task from an IRQ means we leave copies of PSTATE | |
664 | * on the stack. cpufeature's enable calls may modify PSTATE, but | |
665 | * resuming one of these preempted tasks would undo those changes. | |
666 | * | |
667 | * Only allow a task to be preempted once cpufeatures have been | |
668 | * enabled. | |
669 | */ | |
b51c6ac2 | 670 | if (system_capabilities_finalized()) |
19c95f26 JT |
671 | preempt_schedule_irq(); |
672 | } | |
ab7876a9 DM |
673 | |
674 | #ifdef CONFIG_BINFMT_ELF | |
675 | int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, | |
676 | bool has_interp, bool is_interp) | |
677 | { | |
5d1b631c MB |
678 | /* |
679 | * For dynamically linked executables the interpreter is | |
680 | * responsible for setting PROT_BTI on everything except | |
681 | * itself. | |
682 | */ | |
ab7876a9 DM |
683 | if (is_interp != has_interp) |
684 | return prot; | |
685 | ||
686 | if (!(state->flags & ARM64_ELF_BTI)) | |
687 | return prot; | |
688 | ||
689 | if (prot & PROT_EXEC) | |
690 | prot |= PROT_BTI; | |
691 | ||
692 | return prot; | |
693 | } | |
694 | #endif |