Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b3901d54 CM |
2 | /* |
3 | * Based on arch/arm/kernel/process.c | |
4 | * | |
5 | * Original Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | |
7 | * Copyright (C) 2012 ARM Ltd. | |
b3901d54 CM |
8 | */ |
9 | ||
10 | #include <stdarg.h> | |
11 | ||
fd92d4a5 | 12 | #include <linux/compat.h> |
60c0d45a | 13 | #include <linux/efi.h> |
b3901d54 CM |
14 | #include <linux/export.h> |
15 | #include <linux/sched.h> | |
b17b0153 | 16 | #include <linux/sched/debug.h> |
29930025 | 17 | #include <linux/sched/task.h> |
68db0cf1 | 18 | #include <linux/sched/task_stack.h> |
b3901d54 CM |
19 | #include <linux/kernel.h> |
20 | #include <linux/mm.h> | |
21 | #include <linux/stddef.h> | |
63f0c603 | 22 | #include <linux/sysctl.h> |
b3901d54 CM |
23 | #include <linux/unistd.h> |
24 | #include <linux/user.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/reboot.h> | |
27 | #include <linux/interrupt.h> | |
b3901d54 CM |
28 | #include <linux/init.h> |
29 | #include <linux/cpu.h> | |
30 | #include <linux/elfcore.h> | |
31 | #include <linux/pm.h> | |
32 | #include <linux/tick.h> | |
33 | #include <linux/utsname.h> | |
34 | #include <linux/uaccess.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/hw_breakpoint.h> | |
37 | #include <linux/personality.h> | |
38 | #include <linux/notifier.h> | |
096b3224 | 39 | #include <trace/events/power.h> |
c02433dd | 40 | #include <linux/percpu.h> |
bc0ee476 | 41 | #include <linux/thread_info.h> |
63f0c603 | 42 | #include <linux/prctl.h> |
b3901d54 | 43 | |
57f4959b | 44 | #include <asm/alternative.h> |
a9806aa2 | 45 | #include <asm/arch_gicv3.h> |
b3901d54 CM |
46 | #include <asm/compat.h> |
47 | #include <asm/cacheflush.h> | |
d0854412 | 48 | #include <asm/exec.h> |
ec45d1cf WD |
49 | #include <asm/fpsimd.h> |
50 | #include <asm/mmu_context.h> | |
b3901d54 | 51 | #include <asm/processor.h> |
75031975 | 52 | #include <asm/pointer_auth.h> |
b3901d54 | 53 | #include <asm/stacktrace.h> |
b3901d54 | 54 | |
0a1213fa | 55 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) |
c0c264ae LA |
56 | #include <linux/stackprotector.h> |
57 | unsigned long __stack_chk_guard __read_mostly; | |
58 | EXPORT_SYMBOL(__stack_chk_guard); | |
59 | #endif | |
60 | ||
b3901d54 CM |
61 | /* |
62 | * Function pointers to optional machine specific functions | |
63 | */ | |
64 | void (*pm_power_off)(void); | |
65 | EXPORT_SYMBOL_GPL(pm_power_off); | |
66 | ||
b0946fc8 | 67 | void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); |
b3901d54 | 68 | |
a9806aa2 JT |
69 | static void __cpu_do_idle(void) |
70 | { | |
71 | dsb(sy); | |
72 | wfi(); | |
73 | } | |
74 | ||
75 | static void __cpu_do_idle_irqprio(void) | |
76 | { | |
77 | unsigned long pmr; | |
78 | unsigned long daif_bits; | |
79 | ||
80 | daif_bits = read_sysreg(daif); | |
81 | write_sysreg(daif_bits | PSR_I_BIT, daif); | |
82 | ||
83 | /* | |
84 | * Unmask PMR before going idle to make sure interrupts can | |
85 | * be raised. | |
86 | */ | |
87 | pmr = gic_read_pmr(); | |
bd82d4bd | 88 | gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); |
a9806aa2 JT |
89 | |
90 | __cpu_do_idle(); | |
91 | ||
92 | gic_write_pmr(pmr); | |
93 | write_sysreg(daif_bits, daif); | |
94 | } | |
95 | ||
96 | /* | |
97 | * cpu_do_idle() | |
98 | * | |
99 | * Idle the processor (wait for interrupt). | |
100 | * | |
101 | * If the CPU supports priority masking we must do additional work to | |
102 | * ensure that interrupts are not masked at the PMR (because the core will | |
103 | * not wake up if we block the wake up signal in the interrupt controller). | |
104 | */ | |
105 | void cpu_do_idle(void) | |
106 | { | |
107 | if (system_uses_irq_prio_masking()) | |
108 | __cpu_do_idle_irqprio(); | |
109 | else | |
110 | __cpu_do_idle(); | |
111 | } | |
112 | ||
b3901d54 CM |
113 | /* |
114 | * This is our default idle handler. | |
115 | */ | |
0087298f | 116 | void arch_cpu_idle(void) |
b3901d54 CM |
117 | { |
118 | /* | |
119 | * This should do all the clock switching and wait for interrupt | |
120 | * tricks | |
121 | */ | |
096b3224 | 122 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
6990566b NP |
123 | cpu_do_idle(); |
124 | local_irq_enable(); | |
096b3224 | 125 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
b3901d54 CM |
126 | } |
127 | ||
9327e2c6 MR |
128 | #ifdef CONFIG_HOTPLUG_CPU |
129 | void arch_cpu_idle_dead(void) | |
130 | { | |
131 | cpu_die(); | |
132 | } | |
133 | #endif | |
134 | ||
90f51a09 AK |
135 | /* |
136 | * Called by kexec, immediately prior to machine_kexec(). | |
137 | * | |
138 | * This must completely disable all secondary CPUs; simply causing those CPUs | |
139 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | |
140 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | |
141 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | |
142 | * functionality embodied in disable_nonboot_cpus() to achieve this. | |
143 | */ | |
b3901d54 CM |
144 | void machine_shutdown(void) |
145 | { | |
90f51a09 | 146 | disable_nonboot_cpus(); |
b3901d54 CM |
147 | } |
148 | ||
90f51a09 AK |
149 | /* |
150 | * Halting simply requires that the secondary CPUs stop performing any | |
151 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
152 | * achieves this. | |
153 | */ | |
b3901d54 CM |
154 | void machine_halt(void) |
155 | { | |
b9acc49e | 156 | local_irq_disable(); |
90f51a09 | 157 | smp_send_stop(); |
b3901d54 CM |
158 | while (1); |
159 | } | |
160 | ||
90f51a09 AK |
161 | /* |
162 | * Power-off simply requires that the secondary CPUs stop performing any | |
163 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
164 | * achieves this. When the system power is turned off, it will take all CPUs | |
165 | * with it. | |
166 | */ | |
b3901d54 CM |
167 | void machine_power_off(void) |
168 | { | |
b9acc49e | 169 | local_irq_disable(); |
90f51a09 | 170 | smp_send_stop(); |
b3901d54 CM |
171 | if (pm_power_off) |
172 | pm_power_off(); | |
173 | } | |
174 | ||
90f51a09 AK |
175 | /* |
176 | * Restart requires that the secondary CPUs stop performing any activity | |
68234df4 | 177 | * while the primary CPU resets the system. Systems with multiple CPUs must |
90f51a09 AK |
178 | * provide a HW restart implementation, to ensure that all CPUs reset at once. |
179 | * This is required so that any code running after reset on the primary CPU | |
180 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | |
181 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | |
182 | * to use. Implementing such co-ordination would be essentially impossible. | |
183 | */ | |
b3901d54 CM |
184 | void machine_restart(char *cmd) |
185 | { | |
b3901d54 CM |
186 | /* Disable interrupts first */ |
187 | local_irq_disable(); | |
b9acc49e | 188 | smp_send_stop(); |
b3901d54 | 189 | |
60c0d45a AB |
190 | /* |
191 | * UpdateCapsule() depends on the system being reset via | |
192 | * ResetSystem(). | |
193 | */ | |
194 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | |
195 | efi_reboot(reboot_mode, NULL); | |
196 | ||
b3901d54 | 197 | /* Now call the architecture specific reboot code. */ |
aa1e8ec1 | 198 | if (arm_pm_restart) |
ff701306 | 199 | arm_pm_restart(reboot_mode, cmd); |
1c7ffc32 GR |
200 | else |
201 | do_kernel_restart(cmd); | |
b3901d54 CM |
202 | |
203 | /* | |
204 | * Whoops - the architecture was unable to reboot. | |
205 | */ | |
206 | printk("Reboot failed -- System halted\n"); | |
207 | while (1); | |
208 | } | |
209 | ||
b7300d4c WD |
210 | static void print_pstate(struct pt_regs *regs) |
211 | { | |
212 | u64 pstate = regs->pstate; | |
213 | ||
214 | if (compat_user_mode(regs)) { | |
215 | printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", | |
216 | pstate, | |
d64567f6 MR |
217 | pstate & PSR_AA32_N_BIT ? 'N' : 'n', |
218 | pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', | |
219 | pstate & PSR_AA32_C_BIT ? 'C' : 'c', | |
220 | pstate & PSR_AA32_V_BIT ? 'V' : 'v', | |
221 | pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', | |
222 | pstate & PSR_AA32_T_BIT ? "T32" : "A32", | |
223 | pstate & PSR_AA32_E_BIT ? "BE" : "LE", | |
224 | pstate & PSR_AA32_A_BIT ? 'A' : 'a', | |
225 | pstate & PSR_AA32_I_BIT ? 'I' : 'i', | |
226 | pstate & PSR_AA32_F_BIT ? 'F' : 'f'); | |
b7300d4c WD |
227 | } else { |
228 | printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n", | |
229 | pstate, | |
230 | pstate & PSR_N_BIT ? 'N' : 'n', | |
231 | pstate & PSR_Z_BIT ? 'Z' : 'z', | |
232 | pstate & PSR_C_BIT ? 'C' : 'c', | |
233 | pstate & PSR_V_BIT ? 'V' : 'v', | |
234 | pstate & PSR_D_BIT ? 'D' : 'd', | |
235 | pstate & PSR_A_BIT ? 'A' : 'a', | |
236 | pstate & PSR_I_BIT ? 'I' : 'i', | |
237 | pstate & PSR_F_BIT ? 'F' : 'f', | |
238 | pstate & PSR_PAN_BIT ? '+' : '-', | |
239 | pstate & PSR_UAO_BIT ? '+' : '-'); | |
240 | } | |
241 | } | |
242 | ||
b3901d54 CM |
243 | void __show_regs(struct pt_regs *regs) |
244 | { | |
6ca68e80 CM |
245 | int i, top_reg; |
246 | u64 lr, sp; | |
247 | ||
248 | if (compat_user_mode(regs)) { | |
249 | lr = regs->compat_lr; | |
250 | sp = regs->compat_sp; | |
251 | top_reg = 12; | |
252 | } else { | |
253 | lr = regs->regs[30]; | |
254 | sp = regs->sp; | |
255 | top_reg = 29; | |
256 | } | |
b3901d54 | 257 | |
a43cb95d | 258 | show_regs_print_info(KERN_DEFAULT); |
b7300d4c | 259 | print_pstate(regs); |
a06f818a WD |
260 | |
261 | if (!user_mode(regs)) { | |
262 | printk("pc : %pS\n", (void *)regs->pc); | |
263 | printk("lr : %pS\n", (void *)lr); | |
264 | } else { | |
265 | printk("pc : %016llx\n", regs->pc); | |
266 | printk("lr : %016llx\n", lr); | |
267 | } | |
268 | ||
b7300d4c | 269 | printk("sp : %016llx\n", sp); |
db4b0710 | 270 | |
133d0518 JT |
271 | if (system_uses_irq_prio_masking()) |
272 | printk("pmr_save: %08llx\n", regs->pmr_save); | |
273 | ||
db4b0710 MR |
274 | i = top_reg; |
275 | ||
276 | while (i >= 0) { | |
b3901d54 | 277 | printk("x%-2d: %016llx ", i, regs->regs[i]); |
db4b0710 MR |
278 | i--; |
279 | ||
280 | if (i % 2 == 0) { | |
281 | pr_cont("x%-2d: %016llx ", i, regs->regs[i]); | |
282 | i--; | |
283 | } | |
284 | ||
285 | pr_cont("\n"); | |
b3901d54 | 286 | } |
b3901d54 CM |
287 | } |
288 | ||
289 | void show_regs(struct pt_regs * regs) | |
290 | { | |
b3901d54 | 291 | __show_regs(regs); |
1149aad1 | 292 | dump_backtrace(regs, NULL); |
b3901d54 CM |
293 | } |
294 | ||
eb35bdd7 WD |
295 | static void tls_thread_flush(void) |
296 | { | |
adf75899 | 297 | write_sysreg(0, tpidr_el0); |
eb35bdd7 WD |
298 | |
299 | if (is_compat_task()) { | |
65896545 | 300 | current->thread.uw.tp_value = 0; |
eb35bdd7 WD |
301 | |
302 | /* | |
303 | * We need to ensure ordering between the shadow state and the | |
304 | * hardware state, so that we don't corrupt the hardware state | |
305 | * with a stale shadow state during context switch. | |
306 | */ | |
307 | barrier(); | |
adf75899 | 308 | write_sysreg(0, tpidrro_el0); |
eb35bdd7 WD |
309 | } |
310 | } | |
311 | ||
63f0c603 CM |
312 | static void flush_tagged_addr_state(void) |
313 | { | |
314 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) | |
315 | clear_thread_flag(TIF_TAGGED_ADDR); | |
316 | } | |
317 | ||
b3901d54 CM |
318 | void flush_thread(void) |
319 | { | |
320 | fpsimd_flush_thread(); | |
eb35bdd7 | 321 | tls_thread_flush(); |
b3901d54 | 322 | flush_ptrace_hw_breakpoint(current); |
63f0c603 | 323 | flush_tagged_addr_state(); |
b3901d54 CM |
324 | } |
325 | ||
326 | void release_thread(struct task_struct *dead_task) | |
327 | { | |
328 | } | |
329 | ||
bc0ee476 DM |
330 | void arch_release_task_struct(struct task_struct *tsk) |
331 | { | |
332 | fpsimd_release_task(tsk); | |
333 | } | |
334 | ||
335 | /* | |
336 | * src and dst may temporarily have aliased sve_state after task_struct | |
337 | * is copied. We cannot fix this properly here, because src may have | |
338 | * live SVE state and dst's thread_info may not exist yet, so tweaking | |
339 | * either src's or dst's TIF_SVE is not safe. | |
340 | * | |
341 | * The unaliasing is done in copy_thread() instead. This works because | |
342 | * dst is not schedulable or traceable until both of these functions | |
343 | * have been called. | |
344 | */ | |
b3901d54 CM |
345 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
346 | { | |
6eb6c801 JL |
347 | if (current->mm) |
348 | fpsimd_preserve_current_state(); | |
b3901d54 | 349 | *dst = *src; |
bc0ee476 | 350 | |
b3901d54 CM |
351 | return 0; |
352 | } | |
353 | ||
354 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); | |
355 | ||
356 | int copy_thread(unsigned long clone_flags, unsigned long stack_start, | |
afa86fc4 | 357 | unsigned long stk_sz, struct task_struct *p) |
b3901d54 CM |
358 | { |
359 | struct pt_regs *childregs = task_pt_regs(p); | |
b3901d54 | 360 | |
c34501d2 | 361 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
b3901d54 | 362 | |
bc0ee476 DM |
363 | /* |
364 | * Unalias p->thread.sve_state (if any) from the parent task | |
365 | * and disable discard SVE state for p: | |
366 | */ | |
367 | clear_tsk_thread_flag(p, TIF_SVE); | |
368 | p->thread.sve_state = NULL; | |
369 | ||
071b6d4a DM |
370 | /* |
371 | * In case p was allocated the same task_struct pointer as some | |
372 | * other recently-exited task, make sure p is disassociated from | |
373 | * any cpu that may have run that now-exited task recently. | |
374 | * Otherwise we could erroneously skip reloading the FPSIMD | |
375 | * registers for p. | |
376 | */ | |
377 | fpsimd_flush_task_state(p); | |
378 | ||
9ac08002 AV |
379 | if (likely(!(p->flags & PF_KTHREAD))) { |
380 | *childregs = *current_pt_regs(); | |
c34501d2 | 381 | childregs->regs[0] = 0; |
d00a3810 WD |
382 | |
383 | /* | |
384 | * Read the current TLS pointer from tpidr_el0 as it may be | |
385 | * out-of-sync with the saved value. | |
386 | */ | |
adf75899 | 387 | *task_user_tls(p) = read_sysreg(tpidr_el0); |
d00a3810 WD |
388 | |
389 | if (stack_start) { | |
390 | if (is_compat_thread(task_thread_info(p))) | |
e0fd18ce | 391 | childregs->compat_sp = stack_start; |
d00a3810 | 392 | else |
e0fd18ce | 393 | childregs->sp = stack_start; |
c34501d2 | 394 | } |
d00a3810 | 395 | |
b3901d54 | 396 | /* |
c34501d2 CM |
397 | * If a TLS pointer was passed to clone (4th argument), use it |
398 | * for the new thread. | |
b3901d54 | 399 | */ |
c34501d2 | 400 | if (clone_flags & CLONE_SETTLS) |
65896545 | 401 | p->thread.uw.tp_value = childregs->regs[3]; |
c34501d2 CM |
402 | } else { |
403 | memset(childregs, 0, sizeof(struct pt_regs)); | |
404 | childregs->pstate = PSR_MODE_EL1h; | |
57f4959b | 405 | if (IS_ENABLED(CONFIG_ARM64_UAO) && |
a4023f68 | 406 | cpus_have_const_cap(ARM64_HAS_UAO)) |
57f4959b | 407 | childregs->pstate |= PSR_UAO_BIT; |
8f04e8e6 WD |
408 | |
409 | if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) | |
cbdf8a18 | 410 | set_ssbs_bit(childregs); |
8f04e8e6 | 411 | |
133d0518 JT |
412 | if (system_uses_irq_prio_masking()) |
413 | childregs->pmr_save = GIC_PRIO_IRQON; | |
414 | ||
c34501d2 CM |
415 | p->thread.cpu_context.x19 = stack_start; |
416 | p->thread.cpu_context.x20 = stk_sz; | |
b3901d54 | 417 | } |
b3901d54 | 418 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
c34501d2 | 419 | p->thread.cpu_context.sp = (unsigned long)childregs; |
b3901d54 CM |
420 | |
421 | ptrace_hw_copy_thread(p); | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
936eb65c DM |
426 | void tls_preserve_current_state(void) |
427 | { | |
428 | *task_user_tls(current) = read_sysreg(tpidr_el0); | |
429 | } | |
430 | ||
b3901d54 CM |
431 | static void tls_thread_switch(struct task_struct *next) |
432 | { | |
936eb65c | 433 | tls_preserve_current_state(); |
b3901d54 | 434 | |
18011eac | 435 | if (is_compat_thread(task_thread_info(next))) |
65896545 | 436 | write_sysreg(next->thread.uw.tp_value, tpidrro_el0); |
18011eac WD |
437 | else if (!arm64_kernel_unmapped_at_el0()) |
438 | write_sysreg(0, tpidrro_el0); | |
b3901d54 | 439 | |
18011eac | 440 | write_sysreg(*task_user_tls(next), tpidr_el0); |
b3901d54 CM |
441 | } |
442 | ||
57f4959b | 443 | /* Restore the UAO state depending on next's addr_limit */ |
d0854412 | 444 | void uao_thread_switch(struct task_struct *next) |
57f4959b | 445 | { |
e950631e CM |
446 | if (IS_ENABLED(CONFIG_ARM64_UAO)) { |
447 | if (task_thread_info(next)->addr_limit == KERNEL_DS) | |
448 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); | |
449 | else | |
450 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO)); | |
451 | } | |
57f4959b JM |
452 | } |
453 | ||
cbdf8a18 MZ |
454 | /* |
455 | * Force SSBS state on context-switch, since it may be lost after migrating | |
456 | * from a CPU which treats the bit as RES0 in a heterogeneous system. | |
457 | */ | |
458 | static void ssbs_thread_switch(struct task_struct *next) | |
459 | { | |
460 | struct pt_regs *regs = task_pt_regs(next); | |
461 | ||
462 | /* | |
463 | * Nothing to do for kernel threads, but 'regs' may be junk | |
464 | * (e.g. idle task) so check the flags and bail early. | |
465 | */ | |
466 | if (unlikely(next->flags & PF_KTHREAD)) | |
467 | return; | |
468 | ||
469 | /* If the mitigation is enabled, then we leave SSBS clear. */ | |
470 | if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || | |
471 | test_tsk_thread_flag(next, TIF_SSBD)) | |
472 | return; | |
473 | ||
474 | if (compat_user_mode(regs)) | |
475 | set_compat_ssbs_bit(regs); | |
476 | else if (user_mode(regs)) | |
477 | set_ssbs_bit(regs); | |
478 | } | |
479 | ||
c02433dd MR |
480 | /* |
481 | * We store our current task in sp_el0, which is clobbered by userspace. Keep a | |
482 | * shadow copy so that we can restore this upon entry from userspace. | |
483 | * | |
484 | * This is *only* for exception entry from EL0, and is not valid until we | |
485 | * __switch_to() a user task. | |
486 | */ | |
487 | DEFINE_PER_CPU(struct task_struct *, __entry_task); | |
488 | ||
489 | static void entry_task_switch(struct task_struct *next) | |
490 | { | |
491 | __this_cpu_write(__entry_task, next); | |
492 | } | |
493 | ||
b3901d54 CM |
494 | /* |
495 | * Thread switching. | |
496 | */ | |
8f4b326d | 497 | __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, |
b3901d54 CM |
498 | struct task_struct *next) |
499 | { | |
500 | struct task_struct *last; | |
501 | ||
502 | fpsimd_thread_switch(next); | |
503 | tls_thread_switch(next); | |
504 | hw_breakpoint_thread_switch(next); | |
3325732f | 505 | contextidr_thread_switch(next); |
c02433dd | 506 | entry_task_switch(next); |
57f4959b | 507 | uao_thread_switch(next); |
75031975 | 508 | ptrauth_thread_switch(next); |
cbdf8a18 | 509 | ssbs_thread_switch(next); |
b3901d54 | 510 | |
5108c67c CM |
511 | /* |
512 | * Complete any pending TLB or cache maintenance on this CPU in case | |
513 | * the thread migrates to a different CPU. | |
22e4ebb9 MD |
514 | * This full barrier is also required by the membarrier system |
515 | * call. | |
5108c67c | 516 | */ |
98f7685e | 517 | dsb(ish); |
b3901d54 CM |
518 | |
519 | /* the actual thread switch */ | |
520 | last = cpu_switch_to(prev, next); | |
521 | ||
522 | return last; | |
523 | } | |
524 | ||
b3901d54 CM |
525 | unsigned long get_wchan(struct task_struct *p) |
526 | { | |
527 | struct stackframe frame; | |
9bbd4c56 | 528 | unsigned long stack_page, ret = 0; |
b3901d54 CM |
529 | int count = 0; |
530 | if (!p || p == current || p->state == TASK_RUNNING) | |
531 | return 0; | |
532 | ||
9bbd4c56 MR |
533 | stack_page = (unsigned long)try_get_task_stack(p); |
534 | if (!stack_page) | |
535 | return 0; | |
536 | ||
f3dcbe67 DM |
537 | start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p)); |
538 | ||
b3901d54 | 539 | do { |
31e43ad3 | 540 | if (unwind_frame(p, &frame)) |
9bbd4c56 MR |
541 | goto out; |
542 | if (!in_sched_functions(frame.pc)) { | |
543 | ret = frame.pc; | |
544 | goto out; | |
545 | } | |
b3901d54 | 546 | } while (count ++ < 16); |
9bbd4c56 MR |
547 | |
548 | out: | |
549 | put_task_stack(p); | |
550 | return ret; | |
b3901d54 CM |
551 | } |
552 | ||
553 | unsigned long arch_align_stack(unsigned long sp) | |
554 | { | |
555 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | |
556 | sp -= get_random_int() & ~PAGE_MASK; | |
557 | return sp & ~0xf; | |
558 | } | |
559 | ||
d1be5c99 YN |
560 | /* |
561 | * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. | |
562 | */ | |
563 | void arch_setup_new_exec(void) | |
564 | { | |
565 | current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; | |
75031975 MR |
566 | |
567 | ptrauth_thread_init_user(current); | |
d1be5c99 | 568 | } |
63f0c603 CM |
569 | |
570 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI | |
571 | /* | |
572 | * Control the relaxed ABI allowing tagged user addresses into the kernel. | |
573 | */ | |
413235fc | 574 | static unsigned int tagged_addr_disabled; |
63f0c603 CM |
575 | |
576 | long set_tagged_addr_ctrl(unsigned long arg) | |
577 | { | |
63f0c603 CM |
578 | if (is_compat_task()) |
579 | return -EINVAL; | |
580 | if (arg & ~PR_TAGGED_ADDR_ENABLE) | |
581 | return -EINVAL; | |
582 | ||
413235fc CM |
583 | /* |
584 | * Do not allow the enabling of the tagged address ABI if globally | |
585 | * disabled via sysctl abi.tagged_addr_disabled. | |
586 | */ | |
587 | if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) | |
588 | return -EINVAL; | |
589 | ||
63f0c603 CM |
590 | update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); |
591 | ||
592 | return 0; | |
593 | } | |
594 | ||
595 | long get_tagged_addr_ctrl(void) | |
596 | { | |
63f0c603 CM |
597 | if (is_compat_task()) |
598 | return -EINVAL; | |
599 | ||
600 | if (test_thread_flag(TIF_TAGGED_ADDR)) | |
601 | return PR_TAGGED_ADDR_ENABLE; | |
602 | ||
603 | return 0; | |
604 | } | |
605 | ||
606 | /* | |
607 | * Global sysctl to disable the tagged user addresses support. This control | |
608 | * only prevents the tagged address ABI enabling via prctl() and does not | |
609 | * disable it for tasks that already opted in to the relaxed ABI. | |
610 | */ | |
611 | static int zero; | |
612 | static int one = 1; | |
613 | ||
614 | static struct ctl_table tagged_addr_sysctl_table[] = { | |
615 | { | |
413235fc | 616 | .procname = "tagged_addr_disabled", |
63f0c603 | 617 | .mode = 0644, |
413235fc | 618 | .data = &tagged_addr_disabled, |
63f0c603 CM |
619 | .maxlen = sizeof(int), |
620 | .proc_handler = proc_dointvec_minmax, | |
621 | .extra1 = &zero, | |
622 | .extra2 = &one, | |
623 | }, | |
624 | { } | |
625 | }; | |
626 | ||
627 | static int __init tagged_addr_init(void) | |
628 | { | |
629 | if (!register_sysctl("abi", tagged_addr_sysctl_table)) | |
630 | return -EINVAL; | |
631 | return 0; | |
632 | } | |
633 | ||
634 | core_initcall(tagged_addr_init); | |
635 | #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ |