1 // SPDX-License-Identifier: GPL-2.0
3 * Author: Huacai Chen <chenhuacai@loongson.cn>
4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
7 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
8 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 * Copyright (C) 2004 Thiemo Seufer
11 * Copyright (C) 2013 Imagination Technologies Ltd.
13 #include <linux/cpu.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/export.h>
25 #include <linux/ptrace.h>
26 #include <linux/mman.h>
27 #include <linux/personality.h>
28 #include <linux/sys.h>
29 #include <linux/completion.h>
30 #include <linux/kallsyms.h>
31 #include <linux/random.h>
32 #include <linux/prctl.h>
33 #include <linux/nmi.h>
36 #include <asm/bootinfo.h>
42 #include <asm/irq_regs.h>
43 #include <asm/loongarch.h>
44 #include <asm/pgtable.h>
45 #include <asm/processor.h>
47 #include <asm/unwind.h>
51 * Idle related variables and functions
54 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
55 EXPORT_SYMBOL(boot_option_idle_override);
57 #ifdef CONFIG_HOTPLUG_CPU
58 void arch_cpu_idle_dead(void)
64 asmlinkage void ret_from_fork(void);
65 asmlinkage void ret_from_kernel_thread(void);
67 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
73 /* New thread loses kernel privileges. */
74 crmd = regs->csr_crmd & ~(PLV_MASK);
76 regs->csr_crmd = crmd;
78 prmd = regs->csr_prmd & ~(PLV_MASK);
80 regs->csr_prmd = prmd;
82 euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
83 regs->csr_euen = euen;
86 clear_thread_flag(TIF_LSX_CTX_LIVE);
87 clear_thread_flag(TIF_LASX_CTX_LIVE);
93 void exit_thread(struct task_struct *tsk)
97 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
100 * Save any process state which is live in hardware registers to the
101 * parent context prior to duplication. This prevents the new child
102 * state becoming stale if the parent is preempted before copy_thread()
103 * gets a chance to save the parent's live hardware registers to the
114 memcpy(dst, src, sizeof(struct task_struct));
116 memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
122 * Copy architecture-specific thread state
124 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
126 unsigned long childksp;
127 unsigned long tls = args->tls;
128 unsigned long usp = args->stack;
129 unsigned long clone_flags = args->flags;
130 struct pt_regs *childregs, *regs = current_pt_regs();
132 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
134 /* set up new TSS. */
135 childregs = (struct pt_regs *) childksp - 1;
136 /* Put the stack after the struct pt_regs. */
137 childksp = (unsigned long) childregs;
138 p->thread.sched_cfa = 0;
139 p->thread.csr_euen = 0;
140 p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
141 p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
142 p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
143 if (unlikely(args->fn)) {
145 p->thread.reg03 = childksp;
146 p->thread.reg23 = (unsigned long)args->fn;
147 p->thread.reg24 = (unsigned long)args->fn_arg;
148 p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
149 p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
150 memset(childregs, 0, sizeof(struct pt_regs));
151 childregs->csr_euen = p->thread.csr_euen;
152 childregs->csr_crmd = p->thread.csr_crmd;
153 childregs->csr_prmd = p->thread.csr_prmd;
154 childregs->csr_ecfg = p->thread.csr_ecfg;
160 childregs->regs[4] = 0; /* Child gets zero as return value */
162 childregs->regs[3] = usp;
164 p->thread.reg03 = (unsigned long) childregs;
165 p->thread.reg01 = (unsigned long) ret_from_fork;
166 p->thread.sched_ra = (unsigned long) ret_from_fork;
169 * New tasks lose permission to use the fpu. This accelerates context
170 * switching for most programs since they don't use the fpu.
172 childregs->csr_euen = 0;
174 clear_tsk_thread_flag(p, TIF_USEDFPU);
175 clear_tsk_thread_flag(p, TIF_USEDSIMD);
176 clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
177 clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
179 if (clone_flags & CLONE_SETTLS)
180 childregs->regs[2] = tls;
185 unsigned long __get_wchan(struct task_struct *task)
188 struct unwind_state state;
190 if (!try_get_task_stack(task))
193 unwind_start(&state, task, NULL);
194 state.sp = thread_saved_fp(task);
195 get_stack_info(state.sp, state.task, &state.stack_info);
196 state.pc = thread_saved_ra(task);
197 #ifdef CONFIG_UNWINDER_PROLOGUE
198 state.type = UNWINDER_PROLOGUE;
200 for (; !unwind_done(&state); unwind_next_frame(&state)) {
201 pc = unwind_get_return_address(&state);
204 if (in_sched_functions(pc))
209 put_task_stack(task);
214 bool in_irq_stack(unsigned long stack, struct stack_info *info)
216 unsigned long nextsp;
217 unsigned long begin = (unsigned long)this_cpu_read(irq_stack);
218 unsigned long end = begin + IRQ_STACK_START;
220 if (stack < begin || stack >= end)
223 nextsp = *(unsigned long *)end;
224 if (nextsp & (SZREG - 1))
229 info->next_sp = nextsp;
230 info->type = STACK_TYPE_IRQ;
235 bool in_task_stack(unsigned long stack, struct task_struct *task,
236 struct stack_info *info)
238 unsigned long begin = (unsigned long)task_stack_page(task);
239 unsigned long end = begin + THREAD_SIZE - 32;
241 if (stack < begin || stack >= end)
247 info->type = STACK_TYPE_TASK;
252 int get_stack_info(unsigned long stack, struct task_struct *task,
253 struct stack_info *info)
255 task = task ? : current;
257 if (!stack || stack & (SZREG - 1))
260 if (in_task_stack(stack, task, info))
266 if (in_irq_stack(stack, info))
270 info->type = STACK_TYPE_UNKNOWN;
274 unsigned long stack_top(void)
276 unsigned long top = TASK_SIZE & PAGE_MASK;
278 /* Space for the VDSO & data page */
279 top -= PAGE_ALIGN(current->thread.vdso->size);
282 /* Space to randomize the VDSO base */
283 if (current->flags & PF_RANDOMIZE)
284 top -= VDSO_RANDOMIZE_SIZE;
290 * Don't forget that the stack pointer must be aligned on a 8 bytes
291 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
293 unsigned long arch_align_stack(unsigned long sp)
295 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
296 sp -= get_random_int() & ~PAGE_MASK;
298 return sp & STACK_ALIGN;
301 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
302 static struct cpumask backtrace_csd_busy;
304 static void handle_backtrace(void *info)
306 nmi_cpu_backtrace(get_irq_regs());
307 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
310 static void raise_backtrace(cpumask_t *mask)
312 call_single_data_t *csd;
315 for_each_cpu(cpu, mask) {
317 * If we previously sent an IPI to the target CPU & it hasn't
318 * cleared its bit in the busy cpumask then it didn't handle
319 * our previous IPI & it's not safe for us to reuse the
320 * call_single_data_t.
322 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
323 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
328 csd = &per_cpu(backtrace_csd, cpu);
329 csd->func = handle_backtrace;
330 smp_call_function_single_async(cpu, csd);
334 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
336 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
340 void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
344 for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) {
345 uregs[i] = regs->regs[i - LOONGARCH_EF_R0];
348 uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0;
349 uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era;
350 uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr;
351 uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd;
352 uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd;
353 uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen;
354 uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
355 uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
357 #endif /* CONFIG_64BIT */