Commit | Line | Data |
---|---|---|
b3901d54 CM |
1 | /* |
2 | * Based on arch/arm/kernel/process.c | |
3 | * | |
4 | * Original Copyright (C) 1995 Linus Torvalds | |
5 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | |
6 | * Copyright (C) 2012 ARM Ltd. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include <stdarg.h> | |
22 | ||
fd92d4a5 | 23 | #include <linux/compat.h> |
60c0d45a | 24 | #include <linux/efi.h> |
b3901d54 CM |
25 | #include <linux/export.h> |
26 | #include <linux/sched.h> | |
b17b0153 | 27 | #include <linux/sched/debug.h> |
29930025 | 28 | #include <linux/sched/task.h> |
68db0cf1 | 29 | #include <linux/sched/task_stack.h> |
b3901d54 CM |
30 | #include <linux/kernel.h> |
31 | #include <linux/mm.h> | |
32 | #include <linux/stddef.h> | |
33 | #include <linux/unistd.h> | |
34 | #include <linux/user.h> | |
35 | #include <linux/delay.h> | |
36 | #include <linux/reboot.h> | |
37 | #include <linux/interrupt.h> | |
b3901d54 CM |
38 | #include <linux/init.h> |
39 | #include <linux/cpu.h> | |
40 | #include <linux/elfcore.h> | |
41 | #include <linux/pm.h> | |
42 | #include <linux/tick.h> | |
43 | #include <linux/utsname.h> | |
44 | #include <linux/uaccess.h> | |
45 | #include <linux/random.h> | |
46 | #include <linux/hw_breakpoint.h> | |
47 | #include <linux/personality.h> | |
48 | #include <linux/notifier.h> | |
096b3224 | 49 | #include <trace/events/power.h> |
c02433dd | 50 | #include <linux/percpu.h> |
bc0ee476 | 51 | #include <linux/thread_info.h> |
b3901d54 | 52 | |
57f4959b | 53 | #include <asm/alternative.h> |
b3901d54 CM |
54 | #include <asm/compat.h> |
55 | #include <asm/cacheflush.h> | |
d0854412 | 56 | #include <asm/exec.h> |
ec45d1cf WD |
57 | #include <asm/fpsimd.h> |
58 | #include <asm/mmu_context.h> | |
b3901d54 CM |
59 | #include <asm/processor.h> |
60 | #include <asm/stacktrace.h> | |
b3901d54 | 61 | |
c0c264ae LA |
62 | #ifdef CONFIG_CC_STACKPROTECTOR |
63 | #include <linux/stackprotector.h> | |
64 | unsigned long __stack_chk_guard __read_mostly; | |
65 | EXPORT_SYMBOL(__stack_chk_guard); | |
66 | #endif | |
67 | ||
b3901d54 CM |
68 | /* |
69 | * Function pointers to optional machine specific functions | |
70 | */ | |
71 | void (*pm_power_off)(void); | |
72 | EXPORT_SYMBOL_GPL(pm_power_off); | |
73 | ||
b0946fc8 | 74 | void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); |
b3901d54 | 75 | |
b3901d54 CM |
76 | /* |
77 | * This is our default idle handler. | |
78 | */ | |
0087298f | 79 | void arch_cpu_idle(void) |
b3901d54 CM |
80 | { |
81 | /* | |
82 | * This should do all the clock switching and wait for interrupt | |
83 | * tricks | |
84 | */ | |
096b3224 | 85 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
6990566b NP |
86 | cpu_do_idle(); |
87 | local_irq_enable(); | |
096b3224 | 88 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
b3901d54 CM |
89 | } |
90 | ||
9327e2c6 MR |
91 | #ifdef CONFIG_HOTPLUG_CPU |
92 | void arch_cpu_idle_dead(void) | |
93 | { | |
94 | cpu_die(); | |
95 | } | |
96 | #endif | |
97 | ||
90f51a09 AK |
98 | /* |
99 | * Called by kexec, immediately prior to machine_kexec(). | |
100 | * | |
101 | * This must completely disable all secondary CPUs; simply causing those CPUs | |
102 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | |
103 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | |
104 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | |
105 | * functionality embodied in disable_nonboot_cpus() to achieve this. | |
106 | */ | |
b3901d54 CM |
107 | void machine_shutdown(void) |
108 | { | |
90f51a09 | 109 | disable_nonboot_cpus(); |
b3901d54 CM |
110 | } |
111 | ||
90f51a09 AK |
112 | /* |
113 | * Halting simply requires that the secondary CPUs stop performing any | |
114 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
115 | * achieves this. | |
116 | */ | |
b3901d54 CM |
117 | void machine_halt(void) |
118 | { | |
b9acc49e | 119 | local_irq_disable(); |
90f51a09 | 120 | smp_send_stop(); |
b3901d54 CM |
121 | while (1); |
122 | } | |
123 | ||
90f51a09 AK |
124 | /* |
125 | * Power-off simply requires that the secondary CPUs stop performing any | |
126 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
127 | * achieves this. When the system power is turned off, it will take all CPUs | |
128 | * with it. | |
129 | */ | |
b3901d54 CM |
130 | void machine_power_off(void) |
131 | { | |
b9acc49e | 132 | local_irq_disable(); |
90f51a09 | 133 | smp_send_stop(); |
b3901d54 CM |
134 | if (pm_power_off) |
135 | pm_power_off(); | |
136 | } | |
137 | ||
90f51a09 AK |
138 | /* |
139 | * Restart requires that the secondary CPUs stop performing any activity | |
68234df4 | 140 | * while the primary CPU resets the system. Systems with multiple CPUs must |
90f51a09 AK |
141 | * provide a HW restart implementation, to ensure that all CPUs reset at once. |
142 | * This is required so that any code running after reset on the primary CPU | |
143 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | |
144 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | |
145 | * to use. Implementing such co-ordination would be essentially impossible. | |
146 | */ | |
b3901d54 CM |
147 | void machine_restart(char *cmd) |
148 | { | |
b3901d54 CM |
149 | /* Disable interrupts first */ |
150 | local_irq_disable(); | |
b9acc49e | 151 | smp_send_stop(); |
b3901d54 | 152 | |
60c0d45a AB |
153 | /* |
154 | * UpdateCapsule() depends on the system being reset via | |
155 | * ResetSystem(). | |
156 | */ | |
157 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | |
158 | efi_reboot(reboot_mode, NULL); | |
159 | ||
b3901d54 | 160 | /* Now call the architecture specific reboot code. */ |
aa1e8ec1 | 161 | if (arm_pm_restart) |
ff701306 | 162 | arm_pm_restart(reboot_mode, cmd); |
1c7ffc32 GR |
163 | else |
164 | do_kernel_restart(cmd); | |
b3901d54 CM |
165 | |
166 | /* | |
167 | * Whoops - the architecture was unable to reboot. | |
168 | */ | |
169 | printk("Reboot failed -- System halted\n"); | |
170 | while (1); | |
171 | } | |
172 | ||
b7300d4c WD |
173 | static void print_pstate(struct pt_regs *regs) |
174 | { | |
175 | u64 pstate = regs->pstate; | |
176 | ||
177 | if (compat_user_mode(regs)) { | |
178 | printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", | |
179 | pstate, | |
180 | pstate & COMPAT_PSR_N_BIT ? 'N' : 'n', | |
181 | pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z', | |
182 | pstate & COMPAT_PSR_C_BIT ? 'C' : 'c', | |
183 | pstate & COMPAT_PSR_V_BIT ? 'V' : 'v', | |
184 | pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q', | |
185 | pstate & COMPAT_PSR_T_BIT ? "T32" : "A32", | |
186 | pstate & COMPAT_PSR_E_BIT ? "BE" : "LE", | |
187 | pstate & COMPAT_PSR_A_BIT ? 'A' : 'a', | |
188 | pstate & COMPAT_PSR_I_BIT ? 'I' : 'i', | |
189 | pstate & COMPAT_PSR_F_BIT ? 'F' : 'f'); | |
190 | } else { | |
191 | printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n", | |
192 | pstate, | |
193 | pstate & PSR_N_BIT ? 'N' : 'n', | |
194 | pstate & PSR_Z_BIT ? 'Z' : 'z', | |
195 | pstate & PSR_C_BIT ? 'C' : 'c', | |
196 | pstate & PSR_V_BIT ? 'V' : 'v', | |
197 | pstate & PSR_D_BIT ? 'D' : 'd', | |
198 | pstate & PSR_A_BIT ? 'A' : 'a', | |
199 | pstate & PSR_I_BIT ? 'I' : 'i', | |
200 | pstate & PSR_F_BIT ? 'F' : 'f', | |
201 | pstate & PSR_PAN_BIT ? '+' : '-', | |
202 | pstate & PSR_UAO_BIT ? '+' : '-'); | |
203 | } | |
204 | } | |
205 | ||
b3901d54 CM |
206 | void __show_regs(struct pt_regs *regs) |
207 | { | |
6ca68e80 CM |
208 | int i, top_reg; |
209 | u64 lr, sp; | |
210 | ||
211 | if (compat_user_mode(regs)) { | |
212 | lr = regs->compat_lr; | |
213 | sp = regs->compat_sp; | |
214 | top_reg = 12; | |
215 | } else { | |
216 | lr = regs->regs[30]; | |
217 | sp = regs->sp; | |
218 | top_reg = 29; | |
219 | } | |
b3901d54 | 220 | |
a43cb95d | 221 | show_regs_print_info(KERN_DEFAULT); |
b7300d4c | 222 | print_pstate(regs); |
4ef79638 SS |
223 | printk("pc : %pS\n", (void *)regs->pc); |
224 | printk("lr : %pS\n", (void *)lr); | |
b7300d4c | 225 | printk("sp : %016llx\n", sp); |
db4b0710 MR |
226 | |
227 | i = top_reg; | |
228 | ||
229 | while (i >= 0) { | |
b3901d54 | 230 | printk("x%-2d: %016llx ", i, regs->regs[i]); |
db4b0710 MR |
231 | i--; |
232 | ||
233 | if (i % 2 == 0) { | |
234 | pr_cont("x%-2d: %016llx ", i, regs->regs[i]); | |
235 | i--; | |
236 | } | |
237 | ||
238 | pr_cont("\n"); | |
b3901d54 | 239 | } |
b3901d54 CM |
240 | } |
241 | ||
242 | void show_regs(struct pt_regs * regs) | |
243 | { | |
b3901d54 | 244 | __show_regs(regs); |
1149aad1 | 245 | dump_backtrace(regs, NULL); |
b3901d54 CM |
246 | } |
247 | ||
eb35bdd7 WD |
248 | static void tls_thread_flush(void) |
249 | { | |
adf75899 | 250 | write_sysreg(0, tpidr_el0); |
eb35bdd7 WD |
251 | |
252 | if (is_compat_task()) { | |
253 | current->thread.tp_value = 0; | |
254 | ||
255 | /* | |
256 | * We need to ensure ordering between the shadow state and the | |
257 | * hardware state, so that we don't corrupt the hardware state | |
258 | * with a stale shadow state during context switch. | |
259 | */ | |
260 | barrier(); | |
adf75899 | 261 | write_sysreg(0, tpidrro_el0); |
eb35bdd7 WD |
262 | } |
263 | } | |
264 | ||
b3901d54 CM |
265 | void flush_thread(void) |
266 | { | |
267 | fpsimd_flush_thread(); | |
eb35bdd7 | 268 | tls_thread_flush(); |
b3901d54 CM |
269 | flush_ptrace_hw_breakpoint(current); |
270 | } | |
271 | ||
272 | void release_thread(struct task_struct *dead_task) | |
273 | { | |
274 | } | |
275 | ||
bc0ee476 DM |
276 | void arch_release_task_struct(struct task_struct *tsk) |
277 | { | |
278 | fpsimd_release_task(tsk); | |
279 | } | |
280 | ||
281 | /* | |
282 | * src and dst may temporarily have aliased sve_state after task_struct | |
283 | * is copied. We cannot fix this properly here, because src may have | |
284 | * live SVE state and dst's thread_info may not exist yet, so tweaking | |
285 | * either src's or dst's TIF_SVE is not safe. | |
286 | * | |
287 | * The unaliasing is done in copy_thread() instead. This works because | |
288 | * dst is not schedulable or traceable until both of these functions | |
289 | * have been called. | |
290 | */ | |
b3901d54 CM |
291 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
292 | { | |
6eb6c801 JL |
293 | if (current->mm) |
294 | fpsimd_preserve_current_state(); | |
b3901d54 | 295 | *dst = *src; |
bc0ee476 | 296 | |
b3901d54 CM |
297 | return 0; |
298 | } | |
299 | ||
300 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); | |
301 | ||
302 | int copy_thread(unsigned long clone_flags, unsigned long stack_start, | |
afa86fc4 | 303 | unsigned long stk_sz, struct task_struct *p) |
b3901d54 CM |
304 | { |
305 | struct pt_regs *childregs = task_pt_regs(p); | |
b3901d54 | 306 | |
c34501d2 | 307 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
b3901d54 | 308 | |
bc0ee476 DM |
309 | /* |
310 | * Unalias p->thread.sve_state (if any) from the parent task | |
311 | * and disable discard SVE state for p: | |
312 | */ | |
313 | clear_tsk_thread_flag(p, TIF_SVE); | |
314 | p->thread.sve_state = NULL; | |
315 | ||
071b6d4a DM |
316 | /* |
317 | * In case p was allocated the same task_struct pointer as some | |
318 | * other recently-exited task, make sure p is disassociated from | |
319 | * any cpu that may have run that now-exited task recently. | |
320 | * Otherwise we could erroneously skip reloading the FPSIMD | |
321 | * registers for p. | |
322 | */ | |
323 | fpsimd_flush_task_state(p); | |
324 | ||
9ac08002 AV |
325 | if (likely(!(p->flags & PF_KTHREAD))) { |
326 | *childregs = *current_pt_regs(); | |
c34501d2 | 327 | childregs->regs[0] = 0; |
d00a3810 WD |
328 | |
329 | /* | |
330 | * Read the current TLS pointer from tpidr_el0 as it may be | |
331 | * out-of-sync with the saved value. | |
332 | */ | |
adf75899 | 333 | *task_user_tls(p) = read_sysreg(tpidr_el0); |
d00a3810 WD |
334 | |
335 | if (stack_start) { | |
336 | if (is_compat_thread(task_thread_info(p))) | |
e0fd18ce | 337 | childregs->compat_sp = stack_start; |
d00a3810 | 338 | else |
e0fd18ce | 339 | childregs->sp = stack_start; |
c34501d2 | 340 | } |
d00a3810 | 341 | |
b3901d54 | 342 | /* |
c34501d2 CM |
343 | * If a TLS pointer was passed to clone (4th argument), use it |
344 | * for the new thread. | |
b3901d54 | 345 | */ |
c34501d2 | 346 | if (clone_flags & CLONE_SETTLS) |
d00a3810 | 347 | p->thread.tp_value = childregs->regs[3]; |
c34501d2 CM |
348 | } else { |
349 | memset(childregs, 0, sizeof(struct pt_regs)); | |
350 | childregs->pstate = PSR_MODE_EL1h; | |
57f4959b | 351 | if (IS_ENABLED(CONFIG_ARM64_UAO) && |
a4023f68 | 352 | cpus_have_const_cap(ARM64_HAS_UAO)) |
57f4959b | 353 | childregs->pstate |= PSR_UAO_BIT; |
c34501d2 CM |
354 | p->thread.cpu_context.x19 = stack_start; |
355 | p->thread.cpu_context.x20 = stk_sz; | |
b3901d54 | 356 | } |
b3901d54 | 357 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
c34501d2 | 358 | p->thread.cpu_context.sp = (unsigned long)childregs; |
b3901d54 CM |
359 | |
360 | ptrace_hw_copy_thread(p); | |
361 | ||
362 | return 0; | |
363 | } | |
364 | ||
936eb65c DM |
365 | void tls_preserve_current_state(void) |
366 | { | |
367 | *task_user_tls(current) = read_sysreg(tpidr_el0); | |
368 | } | |
369 | ||
b3901d54 CM |
370 | static void tls_thread_switch(struct task_struct *next) |
371 | { | |
936eb65c | 372 | tls_preserve_current_state(); |
b3901d54 | 373 | |
18011eac WD |
374 | if (is_compat_thread(task_thread_info(next))) |
375 | write_sysreg(next->thread.tp_value, tpidrro_el0); | |
376 | else if (!arm64_kernel_unmapped_at_el0()) | |
377 | write_sysreg(0, tpidrro_el0); | |
b3901d54 | 378 | |
18011eac | 379 | write_sysreg(*task_user_tls(next), tpidr_el0); |
b3901d54 CM |
380 | } |
381 | ||
57f4959b | 382 | /* Restore the UAO state depending on next's addr_limit */ |
d0854412 | 383 | void uao_thread_switch(struct task_struct *next) |
57f4959b | 384 | { |
e950631e CM |
385 | if (IS_ENABLED(CONFIG_ARM64_UAO)) { |
386 | if (task_thread_info(next)->addr_limit == KERNEL_DS) | |
387 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); | |
388 | else | |
389 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO)); | |
390 | } | |
57f4959b JM |
391 | } |
392 | ||
c02433dd MR |
393 | /* |
394 | * We store our current task in sp_el0, which is clobbered by userspace. Keep a | |
395 | * shadow copy so that we can restore this upon entry from userspace. | |
396 | * | |
397 | * This is *only* for exception entry from EL0, and is not valid until we | |
398 | * __switch_to() a user task. | |
399 | */ | |
400 | DEFINE_PER_CPU(struct task_struct *, __entry_task); | |
401 | ||
402 | static void entry_task_switch(struct task_struct *next) | |
403 | { | |
404 | __this_cpu_write(__entry_task, next); | |
405 | } | |
406 | ||
b3901d54 CM |
407 | /* |
408 | * Thread switching. | |
409 | */ | |
8f4b326d | 410 | __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, |
b3901d54 CM |
411 | struct task_struct *next) |
412 | { | |
413 | struct task_struct *last; | |
414 | ||
415 | fpsimd_thread_switch(next); | |
416 | tls_thread_switch(next); | |
417 | hw_breakpoint_thread_switch(next); | |
3325732f | 418 | contextidr_thread_switch(next); |
c02433dd | 419 | entry_task_switch(next); |
57f4959b | 420 | uao_thread_switch(next); |
b3901d54 | 421 | |
5108c67c CM |
422 | /* |
423 | * Complete any pending TLB or cache maintenance on this CPU in case | |
424 | * the thread migrates to a different CPU. | |
22e4ebb9 MD |
425 | * This full barrier is also required by the membarrier system |
426 | * call. | |
5108c67c | 427 | */ |
98f7685e | 428 | dsb(ish); |
b3901d54 CM |
429 | |
430 | /* the actual thread switch */ | |
431 | last = cpu_switch_to(prev, next); | |
432 | ||
433 | return last; | |
434 | } | |
435 | ||
b3901d54 CM |
436 | unsigned long get_wchan(struct task_struct *p) |
437 | { | |
438 | struct stackframe frame; | |
9bbd4c56 | 439 | unsigned long stack_page, ret = 0; |
b3901d54 CM |
440 | int count = 0; |
441 | if (!p || p == current || p->state == TASK_RUNNING) | |
442 | return 0; | |
443 | ||
9bbd4c56 MR |
444 | stack_page = (unsigned long)try_get_task_stack(p); |
445 | if (!stack_page) | |
446 | return 0; | |
447 | ||
b3901d54 | 448 | frame.fp = thread_saved_fp(p); |
b3901d54 | 449 | frame.pc = thread_saved_pc(p); |
20380bb3 AT |
450 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
451 | frame.graph = p->curr_ret_stack; | |
452 | #endif | |
b3901d54 | 453 | do { |
31e43ad3 | 454 | if (unwind_frame(p, &frame)) |
9bbd4c56 MR |
455 | goto out; |
456 | if (!in_sched_functions(frame.pc)) { | |
457 | ret = frame.pc; | |
458 | goto out; | |
459 | } | |
b3901d54 | 460 | } while (count ++ < 16); |
9bbd4c56 MR |
461 | |
462 | out: | |
463 | put_task_stack(p); | |
464 | return ret; | |
b3901d54 CM |
465 | } |
466 | ||
467 | unsigned long arch_align_stack(unsigned long sp) | |
468 | { | |
469 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | |
470 | sp -= get_random_int() & ~PAGE_MASK; | |
471 | return sp & ~0xf; | |
472 | } | |
473 | ||
b3901d54 CM |
474 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
475 | { | |
61462c8a | 476 | if (is_compat_task()) |
ffe3d1e4 | 477 | return randomize_page(mm->brk, SZ_32M); |
61462c8a | 478 | else |
ffe3d1e4 | 479 | return randomize_page(mm->brk, SZ_1G); |
b3901d54 | 480 | } |
d1be5c99 YN |
481 | |
482 | /* | |
483 | * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. | |
484 | */ | |
485 | void arch_setup_new_exec(void) | |
486 | { | |
487 | current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; | |
488 | } |