Commit | Line | Data |
---|---|---|
50acfb2b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
7db91e57 PD |
2 | /* |
3 | * Copyright (C) 2012 Regents of the University of California | |
4 | * Copyright (C) 2017 SiFive | |
7db91e57 PD |
5 | */ |
6 | ||
7 | #include <linux/init.h> | |
8 | #include <linux/linkage.h> | |
9 | ||
10 | #include <asm/asm.h> | |
11 | #include <asm/csr.h> | |
12 | #include <asm/unistd.h> | |
13 | #include <asm/thread_info.h> | |
14 | #include <asm/asm-offsets.h> | |
15 | ||
16 | .text | |
17 | .altmacro | |
18 | ||
19 | /* | |
20 | * Prepares to enter a system call or exception by saving all registers to the | |
21 | * stack. | |
22 | */ | |
23 | .macro SAVE_ALL | |
24 | LOCAL _restore_kernel_tpsp | |
25 | LOCAL _save_context | |
26 | ||
27 | /* | |
28 | * If coming from userspace, preserve the user thread pointer and load | |
29 | * the kernel thread pointer. If we came from the kernel, sscratch | |
30 | * will contain 0, and we should continue on the current TP. | |
31 | */ | |
a3182c91 | 32 | csrrw tp, CSR_SSCRATCH, tp |
7db91e57 PD |
33 | bnez tp, _save_context |
34 | ||
35 | _restore_kernel_tpsp: | |
a3182c91 | 36 | csrr tp, CSR_SSCRATCH |
7db91e57 PD |
37 | REG_S sp, TASK_TI_KERNEL_SP(tp) |
38 | _save_context: | |
39 | REG_S sp, TASK_TI_USER_SP(tp) | |
40 | REG_L sp, TASK_TI_KERNEL_SP(tp) | |
41 | addi sp, sp, -(PT_SIZE_ON_STACK) | |
42 | REG_S x1, PT_RA(sp) | |
43 | REG_S x3, PT_GP(sp) | |
44 | REG_S x5, PT_T0(sp) | |
45 | REG_S x6, PT_T1(sp) | |
46 | REG_S x7, PT_T2(sp) | |
47 | REG_S x8, PT_S0(sp) | |
48 | REG_S x9, PT_S1(sp) | |
49 | REG_S x10, PT_A0(sp) | |
50 | REG_S x11, PT_A1(sp) | |
51 | REG_S x12, PT_A2(sp) | |
52 | REG_S x13, PT_A3(sp) | |
53 | REG_S x14, PT_A4(sp) | |
54 | REG_S x15, PT_A5(sp) | |
55 | REG_S x16, PT_A6(sp) | |
56 | REG_S x17, PT_A7(sp) | |
57 | REG_S x18, PT_S2(sp) | |
58 | REG_S x19, PT_S3(sp) | |
59 | REG_S x20, PT_S4(sp) | |
60 | REG_S x21, PT_S5(sp) | |
61 | REG_S x22, PT_S6(sp) | |
62 | REG_S x23, PT_S7(sp) | |
63 | REG_S x24, PT_S8(sp) | |
64 | REG_S x25, PT_S9(sp) | |
65 | REG_S x26, PT_S10(sp) | |
66 | REG_S x27, PT_S11(sp) | |
67 | REG_S x28, PT_T3(sp) | |
68 | REG_S x29, PT_T4(sp) | |
69 | REG_S x30, PT_T5(sp) | |
70 | REG_S x31, PT_T6(sp) | |
71 | ||
72 | /* | |
fe9b842f CH |
73 | * Disable user-mode memory access as it should only be set in the |
74 | * actual user copy routines. | |
75 | * | |
76 | * Disable the FPU to detect illegal usage of floating point in kernel | |
77 | * space. | |
7db91e57 | 78 | */ |
fe9b842f | 79 | li t0, SR_SUM | SR_FS |
7db91e57 PD |
80 | |
81 | REG_L s0, TASK_TI_USER_SP(tp) | |
a3182c91 AP |
82 | csrrc s1, CSR_SSTATUS, t0 |
83 | csrr s2, CSR_SEPC | |
84 | csrr s3, CSR_STVAL | |
85 | csrr s4, CSR_SCAUSE | |
86 | csrr s5, CSR_SSCRATCH | |
7db91e57 PD |
87 | REG_S s0, PT_SP(sp) |
88 | REG_S s1, PT_SSTATUS(sp) | |
89 | REG_S s2, PT_SEPC(sp) | |
90 | REG_S s3, PT_SBADADDR(sp) | |
91 | REG_S s4, PT_SCAUSE(sp) | |
92 | REG_S s5, PT_TP(sp) | |
93 | .endm | |
94 | ||
95 | /* | |
96 | * Prepares to return from a system call or exception by restoring all | |
97 | * registers from the stack. | |
98 | */ | |
99 | .macro RESTORE_ALL | |
100 | REG_L a0, PT_SSTATUS(sp) | |
18856604 PD |
101 | /* |
102 | * The current load reservation is effectively part of the processor's | |
103 | * state, in the sense that load reservations cannot be shared between | |
104 | * different hart contexts. We can't actually save and restore a load | |
105 | * reservation, so instead here we clear any existing reservation -- | |
106 | * it's always legal for implementations to clear load reservations at | |
107 | * any point (as long as the forward progress guarantee is kept, but | |
108 | * we'll ignore that here). | |
109 | * | |
110 | * Dangling load reservations can be the result of taking a trap in the | |
111 | * middle of an LR/SC sequence, but can also be the result of a taken | |
112 | * forward branch around an SC -- which is how we implement CAS. As a | |
113 | * result we need to clear reservations between the last CAS and the | |
114 | * jump back to the new context. While it is unlikely the store | |
115 | * completes, implementations are allowed to expand reservations to be | |
116 | * arbitrarily large. | |
117 | */ | |
118 | REG_L a2, PT_SEPC(sp) | |
119 | REG_SC x0, a2, PT_SEPC(sp) | |
120 | ||
a3182c91 AP |
121 | csrw CSR_SSTATUS, a0 |
122 | csrw CSR_SEPC, a2 | |
7db91e57 PD |
123 | |
124 | REG_L x1, PT_RA(sp) | |
125 | REG_L x3, PT_GP(sp) | |
126 | REG_L x4, PT_TP(sp) | |
127 | REG_L x5, PT_T0(sp) | |
128 | REG_L x6, PT_T1(sp) | |
129 | REG_L x7, PT_T2(sp) | |
130 | REG_L x8, PT_S0(sp) | |
131 | REG_L x9, PT_S1(sp) | |
132 | REG_L x10, PT_A0(sp) | |
133 | REG_L x11, PT_A1(sp) | |
134 | REG_L x12, PT_A2(sp) | |
135 | REG_L x13, PT_A3(sp) | |
136 | REG_L x14, PT_A4(sp) | |
137 | REG_L x15, PT_A5(sp) | |
138 | REG_L x16, PT_A6(sp) | |
139 | REG_L x17, PT_A7(sp) | |
140 | REG_L x18, PT_S2(sp) | |
141 | REG_L x19, PT_S3(sp) | |
142 | REG_L x20, PT_S4(sp) | |
143 | REG_L x21, PT_S5(sp) | |
144 | REG_L x22, PT_S6(sp) | |
145 | REG_L x23, PT_S7(sp) | |
146 | REG_L x24, PT_S8(sp) | |
147 | REG_L x25, PT_S9(sp) | |
148 | REG_L x26, PT_S10(sp) | |
149 | REG_L x27, PT_S11(sp) | |
150 | REG_L x28, PT_T3(sp) | |
151 | REG_L x29, PT_T4(sp) | |
152 | REG_L x30, PT_T5(sp) | |
153 | REG_L x31, PT_T6(sp) | |
154 | ||
155 | REG_L x2, PT_SP(sp) | |
156 | .endm | |
157 | ||
99fd6e87 VC |
158 | #if !IS_ENABLED(CONFIG_PREEMPT) |
159 | .set resume_kernel, restore_all | |
160 | #endif | |
161 | ||
7db91e57 PD |
162 | ENTRY(handle_exception) |
163 | SAVE_ALL | |
164 | ||
165 | /* | |
166 | * Set sscratch register to 0, so that if a recursive exception | |
167 | * occurs, the exception vector knows it came from the kernel | |
168 | */ | |
a3182c91 | 169 | csrw CSR_SSCRATCH, x0 |
7db91e57 PD |
170 | |
171 | /* Load the global pointer */ | |
172 | .option push | |
173 | .option norelax | |
174 | la gp, __global_pointer$ | |
175 | .option pop | |
176 | ||
177 | la ra, ret_from_exception | |
178 | /* | |
179 | * MSB of cause differentiates between | |
180 | * interrupts and exceptions | |
181 | */ | |
182 | bge s4, zero, 1f | |
183 | ||
184 | /* Handle interrupts */ | |
cc6c9848 | 185 | move a0, sp /* pt_regs */ |
6ea0f26a | 186 | tail do_IRQ |
7db91e57 | 187 | 1: |
c82dd6d0 VC |
188 | /* Exceptions run with interrupts enabled or disabled |
189 | depending on the state of sstatus.SR_SPIE */ | |
190 | andi t0, s1, SR_SPIE | |
191 | beqz t0, 1f | |
4f3f9008 | 192 | csrs CSR_SSTATUS, SR_SIE |
bcae803a | 193 | |
c82dd6d0 | 194 | 1: |
7db91e57 PD |
195 | /* Handle syscalls */ |
196 | li t0, EXC_SYSCALL | |
197 | beq s4, t0, handle_syscall | |
198 | ||
199 | /* Handle other exceptions */ | |
200 | slli t0, s4, RISCV_LGPTR | |
201 | la t1, excp_vect_table | |
202 | la t2, excp_vect_table_end | |
203 | move a0, sp /* pt_regs */ | |
204 | add t0, t1, t0 | |
205 | /* Check if exception code lies within bounds */ | |
206 | bgeu t0, t2, 1f | |
207 | REG_L t0, 0(t0) | |
208 | jr t0 | |
209 | 1: | |
210 | tail do_trap_unknown | |
211 | ||
212 | handle_syscall: | |
213 | /* save the initial A0 value (needed in signal handlers) */ | |
214 | REG_S a0, PT_ORIG_A0(sp) | |
215 | /* | |
216 | * Advance SEPC to avoid executing the original | |
217 | * scall instruction on sret | |
218 | */ | |
219 | addi s2, s2, 0x4 | |
220 | REG_S s2, PT_SEPC(sp) | |
7db91e57 PD |
221 | /* Trace syscalls, but only if requested by the user. */ |
222 | REG_L t0, TASK_TI_FLAGS(tp) | |
efe75c49 | 223 | andi t0, t0, _TIF_SYSCALL_WORK |
7db91e57 PD |
224 | bnez t0, handle_syscall_trace_enter |
225 | check_syscall_nr: | |
226 | /* Check to make sure we don't jump to a bogus syscall number. */ | |
227 | li t0, __NR_syscalls | |
228 | la s0, sys_ni_syscall | |
229 | /* Syscall number held in a7 */ | |
230 | bgeu a7, t0, 1f | |
231 | la s0, sys_call_table | |
232 | slli t0, a7, RISCV_LGPTR | |
233 | add s0, s0, t0 | |
234 | REG_L s0, 0(s0) | |
235 | 1: | |
236 | jalr s0 | |
237 | ||
238 | ret_from_syscall: | |
239 | /* Set user a0 to kernel a0 */ | |
240 | REG_S a0, PT_A0(sp) | |
241 | /* Trace syscalls, but only if requested by the user. */ | |
242 | REG_L t0, TASK_TI_FLAGS(tp) | |
efe75c49 | 243 | andi t0, t0, _TIF_SYSCALL_WORK |
7db91e57 PD |
244 | bnez t0, handle_syscall_trace_exit |
245 | ||
246 | ret_from_exception: | |
247 | REG_L s0, PT_SSTATUS(sp) | |
4f3f9008 | 248 | csrc CSR_SSTATUS, SR_SIE |
1125203c | 249 | andi s0, s0, SR_SPP |
99fd6e87 | 250 | bnez s0, resume_kernel |
7db91e57 PD |
251 | |
252 | resume_userspace: | |
253 | /* Interrupts must be disabled here so flags are checked atomically */ | |
254 | REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ | |
255 | andi s1, s0, _TIF_WORK_MASK | |
256 | bnez s1, work_pending | |
257 | ||
258 | /* Save unwound kernel stack pointer in thread_info */ | |
259 | addi s0, sp, PT_SIZE_ON_STACK | |
260 | REG_S s0, TASK_TI_KERNEL_SP(tp) | |
261 | ||
262 | /* | |
263 | * Save TP into sscratch, so we can find the kernel data structures | |
264 | * again. | |
265 | */ | |
a3182c91 | 266 | csrw CSR_SSCRATCH, tp |
7db91e57 PD |
267 | |
268 | restore_all: | |
269 | RESTORE_ALL | |
270 | sret | |
271 | ||
99fd6e87 VC |
272 | #if IS_ENABLED(CONFIG_PREEMPT) |
273 | resume_kernel: | |
274 | REG_L s0, TASK_TI_PREEMPT_COUNT(tp) | |
275 | bnez s0, restore_all | |
276 | need_resched: | |
277 | REG_L s0, TASK_TI_FLAGS(tp) | |
278 | andi s0, s0, _TIF_NEED_RESCHED | |
279 | beqz s0, restore_all | |
280 | call preempt_schedule_irq | |
281 | j need_resched | |
282 | #endif | |
283 | ||
7db91e57 PD |
284 | work_pending: |
285 | /* Enter slow path for supplementary processing */ | |
286 | la ra, ret_from_exception | |
287 | andi s1, s0, _TIF_NEED_RESCHED | |
288 | bnez s1, work_resched | |
289 | work_notifysig: | |
290 | /* Handle pending signals and notify-resume requests */ | |
4f3f9008 | 291 | csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */ |
7db91e57 PD |
292 | move a0, sp /* pt_regs */ |
293 | move a1, s0 /* current_thread_info->flags */ | |
294 | tail do_notify_resume | |
295 | work_resched: | |
296 | tail schedule | |
297 | ||
298 | /* Slow paths for ptrace. */ | |
299 | handle_syscall_trace_enter: | |
300 | move a0, sp | |
301 | call do_syscall_trace_enter | |
302 | REG_L a0, PT_A0(sp) | |
303 | REG_L a1, PT_A1(sp) | |
304 | REG_L a2, PT_A2(sp) | |
305 | REG_L a3, PT_A3(sp) | |
306 | REG_L a4, PT_A4(sp) | |
307 | REG_L a5, PT_A5(sp) | |
308 | REG_L a6, PT_A6(sp) | |
309 | REG_L a7, PT_A7(sp) | |
310 | j check_syscall_nr | |
311 | handle_syscall_trace_exit: | |
312 | move a0, sp | |
313 | call do_syscall_trace_exit | |
314 | j ret_from_exception | |
315 | ||
316 | END(handle_exception) | |
317 | ||
318 | ENTRY(ret_from_fork) | |
319 | la ra, ret_from_exception | |
320 | tail schedule_tail | |
321 | ENDPROC(ret_from_fork) | |
322 | ||
323 | ENTRY(ret_from_kernel_thread) | |
324 | call schedule_tail | |
325 | /* Call fn(arg) */ | |
326 | la ra, ret_from_exception | |
327 | move a0, s1 | |
328 | jr s0 | |
329 | ENDPROC(ret_from_kernel_thread) | |
330 | ||
331 | ||
332 | /* | |
333 | * Integer register context switch | |
334 | * The callee-saved registers must be saved and restored. | |
335 | * | |
336 | * a0: previous task_struct (must be preserved across the switch) | |
337 | * a1: next task_struct | |
338 | * | |
339 | * The value of a0 and a1 must be preserved by this function, as that's how | |
340 | * arguments are passed to schedule_tail. | |
341 | */ | |
342 | ENTRY(__switch_to) | |
343 | /* Save context into prev->thread */ | |
344 | li a4, TASK_THREAD_RA | |
345 | add a3, a0, a4 | |
346 | add a4, a1, a4 | |
347 | REG_S ra, TASK_THREAD_RA_RA(a3) | |
348 | REG_S sp, TASK_THREAD_SP_RA(a3) | |
349 | REG_S s0, TASK_THREAD_S0_RA(a3) | |
350 | REG_S s1, TASK_THREAD_S1_RA(a3) | |
351 | REG_S s2, TASK_THREAD_S2_RA(a3) | |
352 | REG_S s3, TASK_THREAD_S3_RA(a3) | |
353 | REG_S s4, TASK_THREAD_S4_RA(a3) | |
354 | REG_S s5, TASK_THREAD_S5_RA(a3) | |
355 | REG_S s6, TASK_THREAD_S6_RA(a3) | |
356 | REG_S s7, TASK_THREAD_S7_RA(a3) | |
357 | REG_S s8, TASK_THREAD_S8_RA(a3) | |
358 | REG_S s9, TASK_THREAD_S9_RA(a3) | |
359 | REG_S s10, TASK_THREAD_S10_RA(a3) | |
360 | REG_S s11, TASK_THREAD_S11_RA(a3) | |
361 | /* Restore context from next->thread */ | |
362 | REG_L ra, TASK_THREAD_RA_RA(a4) | |
363 | REG_L sp, TASK_THREAD_SP_RA(a4) | |
364 | REG_L s0, TASK_THREAD_S0_RA(a4) | |
365 | REG_L s1, TASK_THREAD_S1_RA(a4) | |
366 | REG_L s2, TASK_THREAD_S2_RA(a4) | |
367 | REG_L s3, TASK_THREAD_S3_RA(a4) | |
368 | REG_L s4, TASK_THREAD_S4_RA(a4) | |
369 | REG_L s5, TASK_THREAD_S5_RA(a4) | |
370 | REG_L s6, TASK_THREAD_S6_RA(a4) | |
371 | REG_L s7, TASK_THREAD_S7_RA(a4) | |
372 | REG_L s8, TASK_THREAD_S8_RA(a4) | |
373 | REG_L s9, TASK_THREAD_S9_RA(a4) | |
374 | REG_L s10, TASK_THREAD_S10_RA(a4) | |
375 | REG_L s11, TASK_THREAD_S11_RA(a4) | |
376 | /* Swap the CPU entry around. */ | |
377 | lw a3, TASK_TI_CPU(a0) | |
378 | lw a4, TASK_TI_CPU(a1) | |
379 | sw a3, TASK_TI_CPU(a1) | |
380 | sw a4, TASK_TI_CPU(a0) | |
381 | #if TASK_TI != 0 | |
382 | #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." | |
383 | addi tp, a1, TASK_TI | |
384 | #else | |
385 | move tp, a1 | |
386 | #endif | |
387 | ret | |
388 | ENDPROC(__switch_to) | |
389 | ||
7db91e57 PD |
390 | .section ".rodata" |
391 | /* Exception vector table */ | |
392 | ENTRY(excp_vect_table) | |
393 | RISCV_PTR do_trap_insn_misaligned | |
394 | RISCV_PTR do_trap_insn_fault | |
395 | RISCV_PTR do_trap_insn_illegal | |
396 | RISCV_PTR do_trap_break | |
397 | RISCV_PTR do_trap_load_misaligned | |
398 | RISCV_PTR do_trap_load_fault | |
399 | RISCV_PTR do_trap_store_misaligned | |
400 | RISCV_PTR do_trap_store_fault | |
401 | RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */ | |
402 | RISCV_PTR do_trap_ecall_s | |
403 | RISCV_PTR do_trap_unknown | |
404 | RISCV_PTR do_trap_ecall_m | |
405 | RISCV_PTR do_page_fault /* instruction page fault */ | |
406 | RISCV_PTR do_page_fault /* load page fault */ | |
407 | RISCV_PTR do_trap_unknown | |
408 | RISCV_PTR do_page_fault /* store page fault */ | |
409 | excp_vect_table_end: | |
410 | END(excp_vect_table) |