Commit | Line | Data |
---|---|---|
2923f5ea GH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (C) 2005-2017 Andes Technology Corporation | |
3 | ||
4 | #include <linux/linkage.h> | |
5 | #include <asm/unistd.h> | |
6 | #include <asm/assembler.h> | |
7 | #include <asm/nds32.h> | |
8 | #include <asm/asm-offsets.h> | |
9 | #include <asm/thread_info.h> | |
10 | #include <asm/current.h> | |
e46bf83c | 11 | #include <asm/fpu.h> |
2923f5ea GH |
12 | |
13 | ||
14 | ||
15 | #ifdef CONFIG_HWZOL | |
16 | .macro pop_zol | |
17 | mtusr $r14, $LB | |
18 | mtusr $r15, $LE | |
19 | mtusr $r16, $LC | |
20 | .endm | |
21 | #endif | |
22 | ||
23 | .macro restore_user_regs_first | |
24 | setgie.d | |
25 | isb | |
e46bf83c VC |
26 | #if defined(CONFIG_FPU) |
27 | addi $sp, $sp, OSP_OFFSET | |
28 | lmw.adm $r12, [$sp], $r25, #0x0 | |
29 | sethi $p0, hi20(has_fpu) | |
30 | lbsi $p0, [$p0+lo12(has_fpu)] | |
31 | beqz $p0, 2f | |
32 | mtsr $r25, $FUCOP_CTL | |
33 | 2: | |
34 | #else | |
2923f5ea | 35 | addi $sp, $sp, FUCOP_CTL_OFFSET |
2923f5ea | 36 | lmw.adm $r12, [$sp], $r24, #0x0 |
e46bf83c | 37 | #endif |
2923f5ea GH |
38 | mtsr $r12, $SP_USR |
39 | mtsr $r13, $IPC | |
40 | #ifdef CONFIG_HWZOL | |
41 | pop_zol | |
42 | #endif | |
43 | mtsr $r19, $PSW | |
44 | mtsr $r20, $IPSW | |
45 | mtsr $r21, $P_IPSW | |
46 | mtsr $r22, $P_IPC | |
47 | mtsr $r23, $P_P0 | |
48 | mtsr $r24, $P_P1 | |
49 | lmw.adm $sp, [$sp], $sp, #0xe | |
50 | .endm | |
51 | ||
52 | .macro restore_user_regs_last | |
53 | pop $p0 | |
54 | cmovn $sp, $p0, $p0 | |
55 | ||
56 | iret | |
57 | nop | |
58 | ||
59 | .endm | |
60 | ||
61 | .macro restore_user_regs | |
62 | restore_user_regs_first | |
63 | lmw.adm $r0, [$sp], $r25, #0x0 | |
64 | addi $sp, $sp, OSP_OFFSET | |
65 | restore_user_regs_last | |
66 | .endm | |
67 | ||
68 | .macro fast_restore_user_regs | |
69 | restore_user_regs_first | |
70 | lmw.adm $r1, [$sp], $r25, #0x0 | |
71 | addi $sp, $sp, OSP_OFFSET-4 | |
72 | restore_user_regs_last | |
73 | .endm | |
74 | ||
75 | #ifdef CONFIG_PREEMPT | |
76 | .macro preempt_stop | |
77 | .endm | |
78 | #else | |
79 | .macro preempt_stop | |
80 | setgie.d | |
81 | isb | |
82 | .endm | |
83 | #define resume_kernel no_work_pending | |
84 | #endif | |
85 | ||
86 | ENTRY(ret_from_exception) | |
87 | preempt_stop | |
88 | ENTRY(ret_from_intr) | |
89 | ||
90 | /* | |
91 | * judge Kernel or user mode | |
92 | * | |
93 | */ | |
94 | lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt | |
95 | andi $p0, $p0, #PSW_mskINTL | |
96 | bnez $p0, resume_kernel ! done with iret | |
97 | j resume_userspace | |
98 | ||
99 | ||
100 | /* | |
101 | * This is the fast syscall return path. We do as little as | |
102 | * possible here, and this includes saving $r0 back into the SVC | |
103 | * stack. | |
104 | * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8 | |
105 | */ | |
106 | ENTRY(ret_fast_syscall) | |
107 | gie_disable | |
108 | lwi $r1, [tsk+#TSK_TI_FLAGS] | |
109 | andi $p1, $r1, #_TIF_WORK_MASK | |
110 | bnez $p1, fast_work_pending | |
111 | fast_restore_user_regs ! iret | |
112 | ||
113 | /* | |
114 | * Ok, we need to do extra processing, | |
115 | * enter the slow path returning from syscall, while pending work. | |
116 | */ | |
117 | fast_work_pending: | |
118 | swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception | |
119 | work_pending: | |
120 | andi $p1, $r1, #_TIF_NEED_RESCHED | |
121 | bnez $p1, work_resched | |
122 | ||
123 | andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME | |
124 | beqz $p1, no_work_pending | |
125 | ||
126 | move $r0, $sp ! 'regs' | |
127 | gie_enable | |
128 | bal do_notify_resume | |
129 | b ret_slow_syscall | |
130 | work_resched: | |
131 | bal schedule ! path, return to user mode | |
132 | ||
133 | /* | |
134 | * "slow" syscall return path. | |
135 | */ | |
136 | ENTRY(resume_userspace) | |
137 | ENTRY(ret_slow_syscall) | |
138 | gie_disable | |
139 | lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt | |
140 | andi $p0, $p0, #PSW_mskINTL | |
141 | bnez $p0, no_work_pending ! done with iret | |
142 | lwi $r1, [tsk+#TSK_TI_FLAGS] | |
143 | andi $p1, $r1, #_TIF_WORK_MASK | |
144 | bnez $p1, work_pending ! handle work_resched, sig_pend | |
145 | ||
146 | no_work_pending: | |
147 | #ifdef CONFIG_TRACE_IRQFLAGS | |
148 | lwi $p0, [$sp+(#IPSW_OFFSET)] | |
149 | andi $p0, $p0, #0x1 | |
0cde56e0 GH |
150 | la $r10, __trace_hardirqs_off |
151 | la $r9, __trace_hardirqs_on | |
2923f5ea GH |
152 | cmovz $r9, $p0, $r10 |
153 | jral $r9 | |
154 | #endif | |
155 | restore_user_regs ! return from iret | |
156 | ||
157 | ||
158 | /* | |
159 | * preemptive kernel | |
160 | */ | |
161 | #ifdef CONFIG_PREEMPT | |
162 | resume_kernel: | |
163 | gie_disable | |
164 | lwi $t0, [tsk+#TSK_TI_PREEMPT] | |
165 | bnez $t0, no_work_pending | |
aa0be0e0 | 166 | |
2923f5ea GH |
167 | lwi $t0, [tsk+#TSK_TI_FLAGS] |
168 | andi $p1, $t0, #_TIF_NEED_RESCHED | |
169 | beqz $p1, no_work_pending | |
170 | ||
171 | lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off? | |
172 | andi $t0, $t0, #1 | |
173 | beqz $t0, no_work_pending | |
174 | ||
175 | jal preempt_schedule_irq | |
aa0be0e0 | 176 | b no_work_pending |
2923f5ea GH |
177 | #endif |
178 | ||
179 | /* | |
180 | * This is how we return from a fork. | |
181 | */ | |
182 | ENTRY(ret_from_fork) | |
183 | bal schedule_tail | |
184 | beqz $r6, 1f ! r6 stores fn for kernel thread | |
185 | move $r0, $r7 ! prepare kernel thread arg | |
186 | jral $r6 | |
187 | 1: | |
188 | lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing | |
189 | andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls? | |
190 | beqz $p1, ret_slow_syscall | |
191 | move $r0, $sp | |
192 | bal syscall_trace_leave | |
193 | b ret_slow_syscall |