Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b3901d54 CM |
2 | /* |
3 | * Based on arch/arm/kernel/process.c | |
4 | * | |
5 | * Original Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | |
7 | * Copyright (C) 2012 ARM Ltd. | |
b3901d54 CM |
8 | */ |
9 | ||
10 | #include <stdarg.h> | |
11 | ||
fd92d4a5 | 12 | #include <linux/compat.h> |
60c0d45a | 13 | #include <linux/efi.h> |
b3901d54 CM |
14 | #include <linux/export.h> |
15 | #include <linux/sched.h> | |
b17b0153 | 16 | #include <linux/sched/debug.h> |
29930025 | 17 | #include <linux/sched/task.h> |
68db0cf1 | 18 | #include <linux/sched/task_stack.h> |
b3901d54 CM |
19 | #include <linux/kernel.h> |
20 | #include <linux/mm.h> | |
21 | #include <linux/stddef.h> | |
22 | #include <linux/unistd.h> | |
23 | #include <linux/user.h> | |
24 | #include <linux/delay.h> | |
25 | #include <linux/reboot.h> | |
26 | #include <linux/interrupt.h> | |
b3901d54 CM |
27 | #include <linux/init.h> |
28 | #include <linux/cpu.h> | |
29 | #include <linux/elfcore.h> | |
30 | #include <linux/pm.h> | |
31 | #include <linux/tick.h> | |
32 | #include <linux/utsname.h> | |
33 | #include <linux/uaccess.h> | |
34 | #include <linux/random.h> | |
35 | #include <linux/hw_breakpoint.h> | |
36 | #include <linux/personality.h> | |
37 | #include <linux/notifier.h> | |
096b3224 | 38 | #include <trace/events/power.h> |
c02433dd | 39 | #include <linux/percpu.h> |
bc0ee476 | 40 | #include <linux/thread_info.h> |
b3901d54 | 41 | |
57f4959b | 42 | #include <asm/alternative.h> |
a9806aa2 | 43 | #include <asm/arch_gicv3.h> |
b3901d54 CM |
44 | #include <asm/compat.h> |
45 | #include <asm/cacheflush.h> | |
d0854412 | 46 | #include <asm/exec.h> |
ec45d1cf WD |
47 | #include <asm/fpsimd.h> |
48 | #include <asm/mmu_context.h> | |
b3901d54 | 49 | #include <asm/processor.h> |
75031975 | 50 | #include <asm/pointer_auth.h> |
b3901d54 | 51 | #include <asm/stacktrace.h> |
b3901d54 | 52 | |
0a1213fa | 53 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) |
c0c264ae LA |
54 | #include <linux/stackprotector.h> |
55 | unsigned long __stack_chk_guard __read_mostly; | |
56 | EXPORT_SYMBOL(__stack_chk_guard); | |
57 | #endif | |
58 | ||
b3901d54 CM |
59 | /* |
60 | * Function pointers to optional machine specific functions | |
61 | */ | |
62 | void (*pm_power_off)(void); | |
63 | EXPORT_SYMBOL_GPL(pm_power_off); | |
64 | ||
b0946fc8 | 65 | void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); |
b3901d54 | 66 | |
a9806aa2 JT |
67 | static void __cpu_do_idle(void) |
68 | { | |
69 | dsb(sy); | |
70 | wfi(); | |
71 | } | |
72 | ||
73 | static void __cpu_do_idle_irqprio(void) | |
74 | { | |
75 | unsigned long pmr; | |
76 | unsigned long daif_bits; | |
77 | ||
78 | daif_bits = read_sysreg(daif); | |
79 | write_sysreg(daif_bits | PSR_I_BIT, daif); | |
80 | ||
81 | /* | |
82 | * Unmask PMR before going idle to make sure interrupts can | |
83 | * be raised. | |
84 | */ | |
85 | pmr = gic_read_pmr(); | |
bd82d4bd | 86 | gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); |
a9806aa2 JT |
87 | |
88 | __cpu_do_idle(); | |
89 | ||
90 | gic_write_pmr(pmr); | |
91 | write_sysreg(daif_bits, daif); | |
92 | } | |
93 | ||
94 | /* | |
95 | * cpu_do_idle() | |
96 | * | |
97 | * Idle the processor (wait for interrupt). | |
98 | * | |
99 | * If the CPU supports priority masking we must do additional work to | |
100 | * ensure that interrupts are not masked at the PMR (because the core will | |
101 | * not wake up if we block the wake up signal in the interrupt controller). | |
102 | */ | |
103 | void cpu_do_idle(void) | |
104 | { | |
105 | if (system_uses_irq_prio_masking()) | |
106 | __cpu_do_idle_irqprio(); | |
107 | else | |
108 | __cpu_do_idle(); | |
109 | } | |
110 | ||
b3901d54 CM |
111 | /* |
112 | * This is our default idle handler. | |
113 | */ | |
0087298f | 114 | void arch_cpu_idle(void) |
b3901d54 CM |
115 | { |
116 | /* | |
117 | * This should do all the clock switching and wait for interrupt | |
118 | * tricks | |
119 | */ | |
096b3224 | 120 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
6990566b NP |
121 | cpu_do_idle(); |
122 | local_irq_enable(); | |
096b3224 | 123 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
b3901d54 CM |
124 | } |
125 | ||
9327e2c6 MR |
126 | #ifdef CONFIG_HOTPLUG_CPU |
127 | void arch_cpu_idle_dead(void) | |
128 | { | |
129 | cpu_die(); | |
130 | } | |
131 | #endif | |
132 | ||
90f51a09 AK |
133 | /* |
134 | * Called by kexec, immediately prior to machine_kexec(). | |
135 | * | |
136 | * This must completely disable all secondary CPUs; simply causing those CPUs | |
137 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | |
138 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | |
139 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | |
140 | * functionality embodied in disable_nonboot_cpus() to achieve this. | |
141 | */ | |
b3901d54 CM |
142 | void machine_shutdown(void) |
143 | { | |
90f51a09 | 144 | disable_nonboot_cpus(); |
b3901d54 CM |
145 | } |
146 | ||
90f51a09 AK |
147 | /* |
148 | * Halting simply requires that the secondary CPUs stop performing any | |
149 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
150 | * achieves this. | |
151 | */ | |
b3901d54 CM |
152 | void machine_halt(void) |
153 | { | |
b9acc49e | 154 | local_irq_disable(); |
90f51a09 | 155 | smp_send_stop(); |
b3901d54 CM |
156 | while (1); |
157 | } | |
158 | ||
90f51a09 AK |
159 | /* |
160 | * Power-off simply requires that the secondary CPUs stop performing any | |
161 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
162 | * achieves this. When the system power is turned off, it will take all CPUs | |
163 | * with it. | |
164 | */ | |
b3901d54 CM |
165 | void machine_power_off(void) |
166 | { | |
b9acc49e | 167 | local_irq_disable(); |
90f51a09 | 168 | smp_send_stop(); |
b3901d54 CM |
169 | if (pm_power_off) |
170 | pm_power_off(); | |
171 | } | |
172 | ||
90f51a09 AK |
173 | /* |
174 | * Restart requires that the secondary CPUs stop performing any activity | |
68234df4 | 175 | * while the primary CPU resets the system. Systems with multiple CPUs must |
90f51a09 AK |
176 | * provide a HW restart implementation, to ensure that all CPUs reset at once. |
177 | * This is required so that any code running after reset on the primary CPU | |
178 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | |
179 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | |
180 | * to use. Implementing such co-ordination would be essentially impossible. | |
181 | */ | |
b3901d54 CM |
182 | void machine_restart(char *cmd) |
183 | { | |
b3901d54 CM |
184 | /* Disable interrupts first */ |
185 | local_irq_disable(); | |
b9acc49e | 186 | smp_send_stop(); |
b3901d54 | 187 | |
60c0d45a AB |
188 | /* |
189 | * UpdateCapsule() depends on the system being reset via | |
190 | * ResetSystem(). | |
191 | */ | |
192 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | |
193 | efi_reboot(reboot_mode, NULL); | |
194 | ||
b3901d54 | 195 | /* Now call the architecture specific reboot code. */ |
aa1e8ec1 | 196 | if (arm_pm_restart) |
ff701306 | 197 | arm_pm_restart(reboot_mode, cmd); |
1c7ffc32 GR |
198 | else |
199 | do_kernel_restart(cmd); | |
b3901d54 CM |
200 | |
201 | /* | |
202 | * Whoops - the architecture was unable to reboot. | |
203 | */ | |
204 | printk("Reboot failed -- System halted\n"); | |
205 | while (1); | |
206 | } | |
207 | ||
b7300d4c WD |
208 | static void print_pstate(struct pt_regs *regs) |
209 | { | |
210 | u64 pstate = regs->pstate; | |
211 | ||
212 | if (compat_user_mode(regs)) { | |
213 | printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", | |
214 | pstate, | |
d64567f6 MR |
215 | pstate & PSR_AA32_N_BIT ? 'N' : 'n', |
216 | pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', | |
217 | pstate & PSR_AA32_C_BIT ? 'C' : 'c', | |
218 | pstate & PSR_AA32_V_BIT ? 'V' : 'v', | |
219 | pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', | |
220 | pstate & PSR_AA32_T_BIT ? "T32" : "A32", | |
221 | pstate & PSR_AA32_E_BIT ? "BE" : "LE", | |
222 | pstate & PSR_AA32_A_BIT ? 'A' : 'a', | |
223 | pstate & PSR_AA32_I_BIT ? 'I' : 'i', | |
224 | pstate & PSR_AA32_F_BIT ? 'F' : 'f'); | |
b7300d4c WD |
225 | } else { |
226 | printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n", | |
227 | pstate, | |
228 | pstate & PSR_N_BIT ? 'N' : 'n', | |
229 | pstate & PSR_Z_BIT ? 'Z' : 'z', | |
230 | pstate & PSR_C_BIT ? 'C' : 'c', | |
231 | pstate & PSR_V_BIT ? 'V' : 'v', | |
232 | pstate & PSR_D_BIT ? 'D' : 'd', | |
233 | pstate & PSR_A_BIT ? 'A' : 'a', | |
234 | pstate & PSR_I_BIT ? 'I' : 'i', | |
235 | pstate & PSR_F_BIT ? 'F' : 'f', | |
236 | pstate & PSR_PAN_BIT ? '+' : '-', | |
237 | pstate & PSR_UAO_BIT ? '+' : '-'); | |
238 | } | |
239 | } | |
240 | ||
b3901d54 CM |
241 | void __show_regs(struct pt_regs *regs) |
242 | { | |
6ca68e80 CM |
243 | int i, top_reg; |
244 | u64 lr, sp; | |
245 | ||
246 | if (compat_user_mode(regs)) { | |
247 | lr = regs->compat_lr; | |
248 | sp = regs->compat_sp; | |
249 | top_reg = 12; | |
250 | } else { | |
251 | lr = regs->regs[30]; | |
252 | sp = regs->sp; | |
253 | top_reg = 29; | |
254 | } | |
b3901d54 | 255 | |
a43cb95d | 256 | show_regs_print_info(KERN_DEFAULT); |
b7300d4c | 257 | print_pstate(regs); |
a06f818a WD |
258 | |
259 | if (!user_mode(regs)) { | |
260 | printk("pc : %pS\n", (void *)regs->pc); | |
261 | printk("lr : %pS\n", (void *)lr); | |
262 | } else { | |
263 | printk("pc : %016llx\n", regs->pc); | |
264 | printk("lr : %016llx\n", lr); | |
265 | } | |
266 | ||
b7300d4c | 267 | printk("sp : %016llx\n", sp); |
db4b0710 | 268 | |
133d0518 JT |
269 | if (system_uses_irq_prio_masking()) |
270 | printk("pmr_save: %08llx\n", regs->pmr_save); | |
271 | ||
db4b0710 MR |
272 | i = top_reg; |
273 | ||
274 | while (i >= 0) { | |
b3901d54 | 275 | printk("x%-2d: %016llx ", i, regs->regs[i]); |
db4b0710 MR |
276 | i--; |
277 | ||
278 | if (i % 2 == 0) { | |
279 | pr_cont("x%-2d: %016llx ", i, regs->regs[i]); | |
280 | i--; | |
281 | } | |
282 | ||
283 | pr_cont("\n"); | |
b3901d54 | 284 | } |
b3901d54 CM |
285 | } |
286 | ||
287 | void show_regs(struct pt_regs * regs) | |
288 | { | |
b3901d54 | 289 | __show_regs(regs); |
1149aad1 | 290 | dump_backtrace(regs, NULL); |
b3901d54 CM |
291 | } |
292 | ||
eb35bdd7 WD |
293 | static void tls_thread_flush(void) |
294 | { | |
adf75899 | 295 | write_sysreg(0, tpidr_el0); |
eb35bdd7 WD |
296 | |
297 | if (is_compat_task()) { | |
65896545 | 298 | current->thread.uw.tp_value = 0; |
eb35bdd7 WD |
299 | |
300 | /* | |
301 | * We need to ensure ordering between the shadow state and the | |
302 | * hardware state, so that we don't corrupt the hardware state | |
303 | * with a stale shadow state during context switch. | |
304 | */ | |
305 | barrier(); | |
adf75899 | 306 | write_sysreg(0, tpidrro_el0); |
eb35bdd7 WD |
307 | } |
308 | } | |
309 | ||
b3901d54 CM |
310 | void flush_thread(void) |
311 | { | |
312 | fpsimd_flush_thread(); | |
eb35bdd7 | 313 | tls_thread_flush(); |
b3901d54 CM |
314 | flush_ptrace_hw_breakpoint(current); |
315 | } | |
316 | ||
317 | void release_thread(struct task_struct *dead_task) | |
318 | { | |
319 | } | |
320 | ||
bc0ee476 DM |
321 | void arch_release_task_struct(struct task_struct *tsk) |
322 | { | |
323 | fpsimd_release_task(tsk); | |
324 | } | |
325 | ||
326 | /* | |
327 | * src and dst may temporarily have aliased sve_state after task_struct | |
328 | * is copied. We cannot fix this properly here, because src may have | |
329 | * live SVE state and dst's thread_info may not exist yet, so tweaking | |
330 | * either src's or dst's TIF_SVE is not safe. | |
331 | * | |
332 | * The unaliasing is done in copy_thread() instead. This works because | |
333 | * dst is not schedulable or traceable until both of these functions | |
334 | * have been called. | |
335 | */ | |
b3901d54 CM |
336 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
337 | { | |
6eb6c801 JL |
338 | if (current->mm) |
339 | fpsimd_preserve_current_state(); | |
b3901d54 | 340 | *dst = *src; |
bc0ee476 | 341 | |
b3901d54 CM |
342 | return 0; |
343 | } | |
344 | ||
345 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); | |
346 | ||
347 | int copy_thread(unsigned long clone_flags, unsigned long stack_start, | |
afa86fc4 | 348 | unsigned long stk_sz, struct task_struct *p) |
b3901d54 CM |
349 | { |
350 | struct pt_regs *childregs = task_pt_regs(p); | |
b3901d54 | 351 | |
c34501d2 | 352 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
b3901d54 | 353 | |
bc0ee476 DM |
354 | /* |
355 | * Unalias p->thread.sve_state (if any) from the parent task | |
356 | * and disable discard SVE state for p: | |
357 | */ | |
358 | clear_tsk_thread_flag(p, TIF_SVE); | |
359 | p->thread.sve_state = NULL; | |
360 | ||
071b6d4a DM |
361 | /* |
362 | * In case p was allocated the same task_struct pointer as some | |
363 | * other recently-exited task, make sure p is disassociated from | |
364 | * any cpu that may have run that now-exited task recently. | |
365 | * Otherwise we could erroneously skip reloading the FPSIMD | |
366 | * registers for p. | |
367 | */ | |
368 | fpsimd_flush_task_state(p); | |
369 | ||
9ac08002 AV |
370 | if (likely(!(p->flags & PF_KTHREAD))) { |
371 | *childregs = *current_pt_regs(); | |
c34501d2 | 372 | childregs->regs[0] = 0; |
d00a3810 WD |
373 | |
374 | /* | |
375 | * Read the current TLS pointer from tpidr_el0 as it may be | |
376 | * out-of-sync with the saved value. | |
377 | */ | |
adf75899 | 378 | *task_user_tls(p) = read_sysreg(tpidr_el0); |
d00a3810 WD |
379 | |
380 | if (stack_start) { | |
381 | if (is_compat_thread(task_thread_info(p))) | |
e0fd18ce | 382 | childregs->compat_sp = stack_start; |
d00a3810 | 383 | else |
e0fd18ce | 384 | childregs->sp = stack_start; |
c34501d2 | 385 | } |
d00a3810 | 386 | |
b3901d54 | 387 | /* |
c34501d2 CM |
388 | * If a TLS pointer was passed to clone (4th argument), use it |
389 | * for the new thread. | |
b3901d54 | 390 | */ |
c34501d2 | 391 | if (clone_flags & CLONE_SETTLS) |
65896545 | 392 | p->thread.uw.tp_value = childregs->regs[3]; |
c34501d2 CM |
393 | } else { |
394 | memset(childregs, 0, sizeof(struct pt_regs)); | |
395 | childregs->pstate = PSR_MODE_EL1h; | |
57f4959b | 396 | if (IS_ENABLED(CONFIG_ARM64_UAO) && |
a4023f68 | 397 | cpus_have_const_cap(ARM64_HAS_UAO)) |
57f4959b | 398 | childregs->pstate |= PSR_UAO_BIT; |
8f04e8e6 WD |
399 | |
400 | if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) | |
cbdf8a18 | 401 | set_ssbs_bit(childregs); |
8f04e8e6 | 402 | |
133d0518 JT |
403 | if (system_uses_irq_prio_masking()) |
404 | childregs->pmr_save = GIC_PRIO_IRQON; | |
405 | ||
c34501d2 CM |
406 | p->thread.cpu_context.x19 = stack_start; |
407 | p->thread.cpu_context.x20 = stk_sz; | |
b3901d54 | 408 | } |
b3901d54 | 409 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
c34501d2 | 410 | p->thread.cpu_context.sp = (unsigned long)childregs; |
b3901d54 CM |
411 | |
412 | ptrace_hw_copy_thread(p); | |
413 | ||
414 | return 0; | |
415 | } | |
416 | ||
936eb65c DM |
417 | void tls_preserve_current_state(void) |
418 | { | |
419 | *task_user_tls(current) = read_sysreg(tpidr_el0); | |
420 | } | |
421 | ||
b3901d54 CM |
422 | static void tls_thread_switch(struct task_struct *next) |
423 | { | |
936eb65c | 424 | tls_preserve_current_state(); |
b3901d54 | 425 | |
18011eac | 426 | if (is_compat_thread(task_thread_info(next))) |
65896545 | 427 | write_sysreg(next->thread.uw.tp_value, tpidrro_el0); |
18011eac WD |
428 | else if (!arm64_kernel_unmapped_at_el0()) |
429 | write_sysreg(0, tpidrro_el0); | |
b3901d54 | 430 | |
18011eac | 431 | write_sysreg(*task_user_tls(next), tpidr_el0); |
b3901d54 CM |
432 | } |
433 | ||
57f4959b | 434 | /* Restore the UAO state depending on next's addr_limit */ |
d0854412 | 435 | void uao_thread_switch(struct task_struct *next) |
57f4959b | 436 | { |
e950631e CM |
437 | if (IS_ENABLED(CONFIG_ARM64_UAO)) { |
438 | if (task_thread_info(next)->addr_limit == KERNEL_DS) | |
439 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); | |
440 | else | |
441 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO)); | |
442 | } | |
57f4959b JM |
443 | } |
444 | ||
cbdf8a18 MZ |
445 | /* |
446 | * Force SSBS state on context-switch, since it may be lost after migrating | |
447 | * from a CPU which treats the bit as RES0 in a heterogeneous system. | |
448 | */ | |
449 | static void ssbs_thread_switch(struct task_struct *next) | |
450 | { | |
451 | struct pt_regs *regs = task_pt_regs(next); | |
452 | ||
453 | /* | |
454 | * Nothing to do for kernel threads, but 'regs' may be junk | |
455 | * (e.g. idle task) so check the flags and bail early. | |
456 | */ | |
457 | if (unlikely(next->flags & PF_KTHREAD)) | |
458 | return; | |
459 | ||
460 | /* If the mitigation is enabled, then we leave SSBS clear. */ | |
461 | if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || | |
462 | test_tsk_thread_flag(next, TIF_SSBD)) | |
463 | return; | |
464 | ||
465 | if (compat_user_mode(regs)) | |
466 | set_compat_ssbs_bit(regs); | |
467 | else if (user_mode(regs)) | |
468 | set_ssbs_bit(regs); | |
469 | } | |
470 | ||
c02433dd MR |
471 | /* |
472 | * We store our current task in sp_el0, which is clobbered by userspace. Keep a | |
473 | * shadow copy so that we can restore this upon entry from userspace. | |
474 | * | |
475 | * This is *only* for exception entry from EL0, and is not valid until we | |
476 | * __switch_to() a user task. | |
477 | */ | |
478 | DEFINE_PER_CPU(struct task_struct *, __entry_task); | |
479 | ||
480 | static void entry_task_switch(struct task_struct *next) | |
481 | { | |
482 | __this_cpu_write(__entry_task, next); | |
483 | } | |
484 | ||
b3901d54 CM |
485 | /* |
486 | * Thread switching. | |
487 | */ | |
8f4b326d | 488 | __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, |
b3901d54 CM |
489 | struct task_struct *next) |
490 | { | |
491 | struct task_struct *last; | |
492 | ||
493 | fpsimd_thread_switch(next); | |
494 | tls_thread_switch(next); | |
495 | hw_breakpoint_thread_switch(next); | |
3325732f | 496 | contextidr_thread_switch(next); |
c02433dd | 497 | entry_task_switch(next); |
57f4959b | 498 | uao_thread_switch(next); |
75031975 | 499 | ptrauth_thread_switch(next); |
cbdf8a18 | 500 | ssbs_thread_switch(next); |
b3901d54 | 501 | |
5108c67c CM |
502 | /* |
503 | * Complete any pending TLB or cache maintenance on this CPU in case | |
504 | * the thread migrates to a different CPU. | |
22e4ebb9 MD |
505 | * This full barrier is also required by the membarrier system |
506 | * call. | |
5108c67c | 507 | */ |
98f7685e | 508 | dsb(ish); |
b3901d54 CM |
509 | |
510 | /* the actual thread switch */ | |
511 | last = cpu_switch_to(prev, next); | |
512 | ||
513 | return last; | |
514 | } | |
515 | ||
b3901d54 CM |
516 | unsigned long get_wchan(struct task_struct *p) |
517 | { | |
518 | struct stackframe frame; | |
9bbd4c56 | 519 | unsigned long stack_page, ret = 0; |
b3901d54 CM |
520 | int count = 0; |
521 | if (!p || p == current || p->state == TASK_RUNNING) | |
522 | return 0; | |
523 | ||
9bbd4c56 MR |
524 | stack_page = (unsigned long)try_get_task_stack(p); |
525 | if (!stack_page) | |
526 | return 0; | |
527 | ||
f3dcbe67 DM |
528 | start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p)); |
529 | ||
b3901d54 | 530 | do { |
31e43ad3 | 531 | if (unwind_frame(p, &frame)) |
9bbd4c56 MR |
532 | goto out; |
533 | if (!in_sched_functions(frame.pc)) { | |
534 | ret = frame.pc; | |
535 | goto out; | |
536 | } | |
b3901d54 | 537 | } while (count ++ < 16); |
9bbd4c56 MR |
538 | |
539 | out: | |
540 | put_task_stack(p); | |
541 | return ret; | |
b3901d54 CM |
542 | } |
543 | ||
544 | unsigned long arch_align_stack(unsigned long sp) | |
545 | { | |
546 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | |
547 | sp -= get_random_int() & ~PAGE_MASK; | |
548 | return sp & ~0xf; | |
549 | } | |
550 | ||
b3901d54 CM |
551 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
552 | { | |
61462c8a | 553 | if (is_compat_task()) |
ffe3d1e4 | 554 | return randomize_page(mm->brk, SZ_32M); |
61462c8a | 555 | else |
ffe3d1e4 | 556 | return randomize_page(mm->brk, SZ_1G); |
b3901d54 | 557 | } |
d1be5c99 YN |
558 | |
559 | /* | |
560 | * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. | |
561 | */ | |
562 | void arch_setup_new_exec(void) | |
563 | { | |
564 | current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; | |
75031975 MR |
565 | |
566 | ptrauth_thread_init_user(current); | |
d1be5c99 | 567 | } |