2 * Based on arch/arm/kernel/process.c
4 * Original Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/compat.h>
24 #include <linux/efi.h>
25 #include <linux/export.h>
26 #include <linux/sched.h>
27 #include <linux/sched/debug.h>
28 #include <linux/sched/task.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/kernel.h>
32 #include <linux/stddef.h>
33 #include <linux/unistd.h>
34 #include <linux/user.h>
35 #include <linux/delay.h>
36 #include <linux/reboot.h>
37 #include <linux/interrupt.h>
38 #include <linux/init.h>
39 #include <linux/cpu.h>
40 #include <linux/elfcore.h>
42 #include <linux/tick.h>
43 #include <linux/utsname.h>
44 #include <linux/uaccess.h>
45 #include <linux/random.h>
46 #include <linux/hw_breakpoint.h>
47 #include <linux/personality.h>
48 #include <linux/notifier.h>
49 #include <trace/events/power.h>
50 #include <linux/percpu.h>
51 #include <linux/thread_info.h>
53 #include <asm/alternative.h>
54 #include <asm/compat.h>
55 #include <asm/cacheflush.h>
57 #include <asm/fpsimd.h>
58 #include <asm/mmu_context.h>
59 #include <asm/processor.h>
60 #include <asm/stacktrace.h>
62 #ifdef CONFIG_CC_STACKPROTECTOR
63 #include <linux/stackprotector.h>
64 unsigned long __stack_chk_guard __read_mostly;
65 EXPORT_SYMBOL(__stack_chk_guard);
69 * Function pointers to optional machine specific functions
71 void (*pm_power_off)(void);
72 EXPORT_SYMBOL_GPL(pm_power_off);
74 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
77 * This is our default idle handler.
79 void arch_cpu_idle(void)
82 * This should do all the clock switching and wait for interrupt
85 trace_cpu_idle_rcuidle(1, smp_processor_id());
88 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
91 #ifdef CONFIG_HOTPLUG_CPU
92 void arch_cpu_idle_dead(void)
99 * Called by kexec, immediately prior to machine_kexec().
101 * This must completely disable all secondary CPUs; simply causing those CPUs
102 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
103 * kexec'd kernel to use any and all RAM as it sees fit, without having to
104 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
105 * functionality embodied in disable_nonboot_cpus() to achieve this.
107 void machine_shutdown(void)
109 disable_nonboot_cpus();
113 * Halting simply requires that the secondary CPUs stop performing any
114 * activity (executing tasks, handling interrupts). smp_send_stop()
117 void machine_halt(void)
125 * Power-off simply requires that the secondary CPUs stop performing any
126 * activity (executing tasks, handling interrupts). smp_send_stop()
127 * achieves this. When the system power is turned off, it will take all CPUs
130 void machine_power_off(void)
139 * Restart requires that the secondary CPUs stop performing any activity
140 * while the primary CPU resets the system. Systems with multiple CPUs must
141 * provide a HW restart implementation, to ensure that all CPUs reset at once.
142 * This is required so that any code running after reset on the primary CPU
143 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
144 * executing pre-reset code, and using RAM that the primary CPU's code wishes
145 * to use. Implementing such co-ordination would be essentially impossible.
147 void machine_restart(char *cmd)
149 /* Disable interrupts first */
154 * UpdateCapsule() depends on the system being reset via
157 if (efi_enabled(EFI_RUNTIME_SERVICES))
158 efi_reboot(reboot_mode, NULL);
160 /* Now call the architecture specific reboot code. */
162 arm_pm_restart(reboot_mode, cmd);
164 do_kernel_restart(cmd);
167 * Whoops - the architecture was unable to reboot.
169 printk("Reboot failed -- System halted\n");
173 static void print_pstate(struct pt_regs *regs)
175 u64 pstate = regs->pstate;
177 if (compat_user_mode(regs)) {
178 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
180 pstate & COMPAT_PSR_N_BIT ? 'N' : 'n',
181 pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z',
182 pstate & COMPAT_PSR_C_BIT ? 'C' : 'c',
183 pstate & COMPAT_PSR_V_BIT ? 'V' : 'v',
184 pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q',
185 pstate & COMPAT_PSR_T_BIT ? "T32" : "A32",
186 pstate & COMPAT_PSR_E_BIT ? "BE" : "LE",
187 pstate & COMPAT_PSR_A_BIT ? 'A' : 'a',
188 pstate & COMPAT_PSR_I_BIT ? 'I' : 'i',
189 pstate & COMPAT_PSR_F_BIT ? 'F' : 'f');
191 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
193 pstate & PSR_N_BIT ? 'N' : 'n',
194 pstate & PSR_Z_BIT ? 'Z' : 'z',
195 pstate & PSR_C_BIT ? 'C' : 'c',
196 pstate & PSR_V_BIT ? 'V' : 'v',
197 pstate & PSR_D_BIT ? 'D' : 'd',
198 pstate & PSR_A_BIT ? 'A' : 'a',
199 pstate & PSR_I_BIT ? 'I' : 'i',
200 pstate & PSR_F_BIT ? 'F' : 'f',
201 pstate & PSR_PAN_BIT ? '+' : '-',
202 pstate & PSR_UAO_BIT ? '+' : '-');
206 void __show_regs(struct pt_regs *regs)
211 if (compat_user_mode(regs)) {
212 lr = regs->compat_lr;
213 sp = regs->compat_sp;
221 show_regs_print_info(KERN_DEFAULT);
223 printk("pc : %pS\n", (void *)regs->pc);
224 printk("lr : %pS\n", (void *)lr);
225 printk("sp : %016llx\n", sp);
230 printk("x%-2d: %016llx ", i, regs->regs[i]);
234 pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
242 void show_regs(struct pt_regs * regs)
245 dump_backtrace(regs, NULL);
248 static void tls_thread_flush(void)
250 write_sysreg(0, tpidr_el0);
252 if (is_compat_task()) {
253 current->thread.tp_value = 0;
256 * We need to ensure ordering between the shadow state and the
257 * hardware state, so that we don't corrupt the hardware state
258 * with a stale shadow state during context switch.
261 write_sysreg(0, tpidrro_el0);
265 void flush_thread(void)
267 fpsimd_flush_thread();
269 flush_ptrace_hw_breakpoint(current);
272 void release_thread(struct task_struct *dead_task)
276 void arch_release_task_struct(struct task_struct *tsk)
278 fpsimd_release_task(tsk);
282 * src and dst may temporarily have aliased sve_state after task_struct
283 * is copied. We cannot fix this properly here, because src may have
284 * live SVE state and dst's thread_info may not exist yet, so tweaking
285 * either src's or dst's TIF_SVE is not safe.
287 * The unaliasing is done in copy_thread() instead. This works because
288 * dst is not schedulable or traceable until both of these functions
291 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
294 fpsimd_preserve_current_state();
300 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
302 int copy_thread(unsigned long clone_flags, unsigned long stack_start,
303 unsigned long stk_sz, struct task_struct *p)
305 struct pt_regs *childregs = task_pt_regs(p);
307 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
310 * Unalias p->thread.sve_state (if any) from the parent task
311 * and disable discard SVE state for p:
313 clear_tsk_thread_flag(p, TIF_SVE);
314 p->thread.sve_state = NULL;
317 * In case p was allocated the same task_struct pointer as some
318 * other recently-exited task, make sure p is disassociated from
319 * any cpu that may have run that now-exited task recently.
320 * Otherwise we could erroneously skip reloading the FPSIMD
323 fpsimd_flush_task_state(p);
325 if (likely(!(p->flags & PF_KTHREAD))) {
326 *childregs = *current_pt_regs();
327 childregs->regs[0] = 0;
330 * Read the current TLS pointer from tpidr_el0 as it may be
331 * out-of-sync with the saved value.
333 *task_user_tls(p) = read_sysreg(tpidr_el0);
336 if (is_compat_thread(task_thread_info(p)))
337 childregs->compat_sp = stack_start;
339 childregs->sp = stack_start;
343 * If a TLS pointer was passed to clone (4th argument), use it
344 * for the new thread.
346 if (clone_flags & CLONE_SETTLS)
347 p->thread.tp_value = childregs->regs[3];
349 memset(childregs, 0, sizeof(struct pt_regs));
350 childregs->pstate = PSR_MODE_EL1h;
351 if (IS_ENABLED(CONFIG_ARM64_UAO) &&
352 cpus_have_const_cap(ARM64_HAS_UAO))
353 childregs->pstate |= PSR_UAO_BIT;
354 p->thread.cpu_context.x19 = stack_start;
355 p->thread.cpu_context.x20 = stk_sz;
357 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
358 p->thread.cpu_context.sp = (unsigned long)childregs;
360 ptrace_hw_copy_thread(p);
365 void tls_preserve_current_state(void)
367 *task_user_tls(current) = read_sysreg(tpidr_el0);
370 static void tls_thread_switch(struct task_struct *next)
372 tls_preserve_current_state();
374 if (is_compat_thread(task_thread_info(next)))
375 write_sysreg(next->thread.tp_value, tpidrro_el0);
376 else if (!arm64_kernel_unmapped_at_el0())
377 write_sysreg(0, tpidrro_el0);
379 write_sysreg(*task_user_tls(next), tpidr_el0);
382 /* Restore the UAO state depending on next's addr_limit */
383 void uao_thread_switch(struct task_struct *next)
385 if (IS_ENABLED(CONFIG_ARM64_UAO)) {
386 if (task_thread_info(next)->addr_limit == KERNEL_DS)
387 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
389 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
394 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
395 * shadow copy so that we can restore this upon entry from userspace.
397 * This is *only* for exception entry from EL0, and is not valid until we
398 * __switch_to() a user task.
400 DEFINE_PER_CPU(struct task_struct *, __entry_task);
402 static void entry_task_switch(struct task_struct *next)
404 __this_cpu_write(__entry_task, next);
410 __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
411 struct task_struct *next)
413 struct task_struct *last;
415 fpsimd_thread_switch(next);
416 tls_thread_switch(next);
417 hw_breakpoint_thread_switch(next);
418 contextidr_thread_switch(next);
419 entry_task_switch(next);
420 uao_thread_switch(next);
423 * Complete any pending TLB or cache maintenance on this CPU in case
424 * the thread migrates to a different CPU.
425 * This full barrier is also required by the membarrier system
430 /* the actual thread switch */
431 last = cpu_switch_to(prev, next);
436 unsigned long get_wchan(struct task_struct *p)
438 struct stackframe frame;
439 unsigned long stack_page, ret = 0;
441 if (!p || p == current || p->state == TASK_RUNNING)
444 stack_page = (unsigned long)try_get_task_stack(p);
448 frame.fp = thread_saved_fp(p);
449 frame.pc = thread_saved_pc(p);
450 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
451 frame.graph = p->curr_ret_stack;
454 if (unwind_frame(p, &frame))
456 if (!in_sched_functions(frame.pc)) {
460 } while (count ++ < 16);
467 unsigned long arch_align_stack(unsigned long sp)
469 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
470 sp -= get_random_int() & ~PAGE_MASK;
474 unsigned long arch_randomize_brk(struct mm_struct *mm)
476 if (is_compat_task())
477 return randomize_page(mm->brk, SZ_32M);
479 return randomize_page(mm->brk, SZ_1G);
483 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
485 void arch_setup_new_exec(void)
487 current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;