2 * Based on arch/arm/kernel/process.c
4 * Original Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/compat.h>
24 #include <linux/efi.h>
25 #include <linux/export.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
29 #include <linux/stddef.h>
30 #include <linux/unistd.h>
31 #include <linux/user.h>
32 #include <linux/delay.h>
33 #include <linux/reboot.h>
34 #include <linux/interrupt.h>
35 #include <linux/kallsyms.h>
36 #include <linux/init.h>
37 #include <linux/cpu.h>
38 #include <linux/elfcore.h>
40 #include <linux/tick.h>
41 #include <linux/utsname.h>
42 #include <linux/uaccess.h>
43 #include <linux/random.h>
44 #include <linux/hw_breakpoint.h>
45 #include <linux/personality.h>
46 #include <linux/notifier.h>
47 #include <trace/events/power.h>
49 #include <asm/alternative.h>
50 #include <asm/compat.h>
51 #include <asm/cacheflush.h>
53 #include <asm/fpsimd.h>
54 #include <asm/mmu_context.h>
55 #include <asm/processor.h>
56 #include <asm/stacktrace.h>
58 #ifdef CONFIG_CC_STACKPROTECTOR
59 #include <linux/stackprotector.h>
60 unsigned long __stack_chk_guard __read_mostly;
61 EXPORT_SYMBOL(__stack_chk_guard);
65 * Function pointers to optional machine specific functions
67 void (*pm_power_off)(void);
68 EXPORT_SYMBOL_GPL(pm_power_off);
70 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
73 * This is our default idle handler.
75 void arch_cpu_idle(void)
78 * This should do all the clock switching and wait for interrupt
81 trace_cpu_idle_rcuidle(1, smp_processor_id());
84 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
87 #ifdef CONFIG_HOTPLUG_CPU
88 void arch_cpu_idle_dead(void)
95 * Called by kexec, immediately prior to machine_kexec().
97 * This must completely disable all secondary CPUs; simply causing those CPUs
98 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
99 * kexec'd kernel to use any and all RAM as it sees fit, without having to
100 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
101 * functionality embodied in disable_nonboot_cpus() to achieve this.
103 void machine_shutdown(void)
105 disable_nonboot_cpus();
109 * Halting simply requires that the secondary CPUs stop performing any
110 * activity (executing tasks, handling interrupts). smp_send_stop()
113 void machine_halt(void)
121 * Power-off simply requires that the secondary CPUs stop performing any
122 * activity (executing tasks, handling interrupts). smp_send_stop()
123 * achieves this. When the system power is turned off, it will take all CPUs
126 void machine_power_off(void)
135 * Restart requires that the secondary CPUs stop performing any activity
136 * while the primary CPU resets the system. Systems with multiple CPUs must
137 * provide a HW restart implementation, to ensure that all CPUs reset at once.
138 * This is required so that any code running after reset on the primary CPU
139 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
140 * executing pre-reset code, and using RAM that the primary CPU's code wishes
141 * to use. Implementing such co-ordination would be essentially impossible.
143 void machine_restart(char *cmd)
145 /* Disable interrupts first */
150 * UpdateCapsule() depends on the system being reset via
153 if (efi_enabled(EFI_RUNTIME_SERVICES))
154 efi_reboot(reboot_mode, NULL);
156 /* Now call the architecture specific reboot code. */
158 arm_pm_restart(reboot_mode, cmd);
160 do_kernel_restart(cmd);
163 * Whoops - the architecture was unable to reboot.
165 printk("Reboot failed -- System halted\n");
169 void __show_regs(struct pt_regs *regs)
174 if (compat_user_mode(regs)) {
175 lr = regs->compat_lr;
176 sp = regs->compat_sp;
184 show_regs_print_info(KERN_DEFAULT);
185 print_symbol("PC is at %s\n", instruction_pointer(regs));
186 print_symbol("LR is at %s\n", lr);
187 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
188 regs->pc, lr, regs->pstate);
189 printk("sp : %016llx\n", sp);
190 for (i = top_reg; i >= 0; i--) {
191 printk("x%-2d: %016llx ", i, regs->regs[i]);
198 void show_regs(struct pt_regs * regs)
204 static void tls_thread_flush(void)
206 write_sysreg(0, tpidr_el0);
208 if (is_compat_task()) {
209 current->thread.tp_value = 0;
212 * We need to ensure ordering between the shadow state and the
213 * hardware state, so that we don't corrupt the hardware state
214 * with a stale shadow state during context switch.
217 write_sysreg(0, tpidrro_el0);
221 void flush_thread(void)
223 fpsimd_flush_thread();
225 flush_ptrace_hw_breakpoint(current);
228 void release_thread(struct task_struct *dead_task)
232 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
235 fpsimd_preserve_current_state();
240 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
242 int copy_thread(unsigned long clone_flags, unsigned long stack_start,
243 unsigned long stk_sz, struct task_struct *p)
245 struct pt_regs *childregs = task_pt_regs(p);
247 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
249 if (likely(!(p->flags & PF_KTHREAD))) {
250 *childregs = *current_pt_regs();
251 childregs->regs[0] = 0;
254 * Read the current TLS pointer from tpidr_el0 as it may be
255 * out-of-sync with the saved value.
257 *task_user_tls(p) = read_sysreg(tpidr_el0);
260 if (is_compat_thread(task_thread_info(p)))
261 childregs->compat_sp = stack_start;
263 childregs->sp = stack_start;
267 * If a TLS pointer was passed to clone (4th argument), use it
268 * for the new thread.
270 if (clone_flags & CLONE_SETTLS)
271 p->thread.tp_value = childregs->regs[3];
273 memset(childregs, 0, sizeof(struct pt_regs));
274 childregs->pstate = PSR_MODE_EL1h;
275 if (IS_ENABLED(CONFIG_ARM64_UAO) &&
276 cpus_have_cap(ARM64_HAS_UAO))
277 childregs->pstate |= PSR_UAO_BIT;
278 p->thread.cpu_context.x19 = stack_start;
279 p->thread.cpu_context.x20 = stk_sz;
281 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
282 p->thread.cpu_context.sp = (unsigned long)childregs;
284 ptrace_hw_copy_thread(p);
289 static void tls_thread_switch(struct task_struct *next)
291 unsigned long tpidr, tpidrro;
293 tpidr = read_sysreg(tpidr_el0);
294 *task_user_tls(current) = tpidr;
296 tpidr = *task_user_tls(next);
297 tpidrro = is_compat_thread(task_thread_info(next)) ?
298 next->thread.tp_value : 0;
300 write_sysreg(tpidr, tpidr_el0);
301 write_sysreg(tpidrro, tpidrro_el0);
304 /* Restore the UAO state depending on next's addr_limit */
305 void uao_thread_switch(struct task_struct *next)
307 if (IS_ENABLED(CONFIG_ARM64_UAO)) {
308 if (task_thread_info(next)->addr_limit == KERNEL_DS)
309 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
311 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
318 struct task_struct *__switch_to(struct task_struct *prev,
319 struct task_struct *next)
321 struct task_struct *last;
323 fpsimd_thread_switch(next);
324 tls_thread_switch(next);
325 hw_breakpoint_thread_switch(next);
326 contextidr_thread_switch(next);
327 uao_thread_switch(next);
330 * Complete any pending TLB or cache maintenance on this CPU in case
331 * the thread migrates to a different CPU.
335 /* the actual thread switch */
336 last = cpu_switch_to(prev, next);
341 unsigned long get_wchan(struct task_struct *p)
343 struct stackframe frame;
344 unsigned long stack_page;
346 if (!p || p == current || p->state == TASK_RUNNING)
349 frame.fp = thread_saved_fp(p);
350 frame.sp = thread_saved_sp(p);
351 frame.pc = thread_saved_pc(p);
352 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
353 frame.graph = p->curr_ret_stack;
355 stack_page = (unsigned long)task_stack_page(p);
357 if (frame.sp < stack_page ||
358 frame.sp >= stack_page + THREAD_SIZE ||
359 unwind_frame(p, &frame))
361 if (!in_sched_functions(frame.pc))
363 } while (count ++ < 16);
367 unsigned long arch_align_stack(unsigned long sp)
369 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
370 sp -= get_random_int() & ~PAGE_MASK;
374 unsigned long arch_randomize_brk(struct mm_struct *mm)
376 if (is_compat_task())
377 return randomize_page(mm->brk, 0x02000000);
379 return randomize_page(mm->brk, 0x40000000);