csky: entry: Remove unneeded need_resched() loop
[linux-2.6-block.git] / arch / csky / kernel / entry.S
CommitLineData
081860b9
GR
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK 0xffc
17#define PTE_INDX_SHIFT 10
18#define _PGDIR_SHIFT 22
19
48ede51f
GR
20.macro zero_fp
21#ifdef CONFIG_STACKTRACE
22 movi r8, 0
23#endif
24.endm
25
081860b9
GR
26.macro tlbop_begin name, val0, val1, val2
27ENTRY(csky_\name)
28 mtcr a3, ss2
29 mtcr r6, ss3
30 mtcr a2, ss4
31
32 RD_PGDR r6
33 RD_MEH a3
34#ifdef CONFIG_CPU_HAS_TLBI
35 tlbi.vaas a3
36 sync.is
37
38 btsti a3, 31
39 bf 1f
40 RD_PGDR_K r6
411:
42#else
43 bgeni a2, 31
44 WR_MCIR a2
45 bgeni a2, 25
46 WR_MCIR a2
47#endif
48 bclri r6, 0
683fafeb 49 lrw a2, va_pa_offset
f62e3162 50 ld.w a2, (a2, 0)
081860b9
GR
51 subu r6, a2
52 bseti r6, 31
53
54 mov a2, a3
55 lsri a2, _PGDIR_SHIFT
56 lsli a2, 2
57 addu r6, a2
58 ldw r6, (r6)
59
683fafeb 60 lrw a2, va_pa_offset
f62e3162 61 ld.w a2, (a2, 0)
081860b9
GR
62 subu r6, a2
63 bseti r6, 31
64
65 lsri a3, PTE_INDX_SHIFT
66 lrw a2, PTE_INDX_MSK
67 and a3, a2
68 addu r6, a3
69 ldw a3, (r6)
70
71 movi a2, (_PAGE_PRESENT | \val0)
72 and a3, a2
73 cmpne a3, a2
74 bt \name
75
76 /* First read/write the page, just update the flags */
77 ldw a3, (r6)
78 bgeni a2, PAGE_VALID_BIT
79 bseti a2, PAGE_ACCESSED_BIT
80 bseti a2, \val1
81 bseti a2, \val2
82 or a3, a2
83 stw a3, (r6)
84
85 /* Some cpu tlb-hardrefill bypass the cache */
86#ifdef CONFIG_CPU_NEED_TLBSYNC
87 movi a2, 0x22
88 bseti a2, 6
89 mtcr r6, cr22
90 mtcr a2, cr17
91 sync
92#endif
93
94 mfcr a3, ss2
95 mfcr r6, ss3
96 mfcr a2, ss4
97 rte
98\name:
99 mfcr a3, ss2
100 mfcr r6, ss3
101 mfcr a2, ss4
bf241682 102 SAVE_ALL 0
081860b9
GR
103.endm
104.macro tlbop_end is_write
48ede51f 105 zero_fp
081860b9
GR
106 RD_MEH a2
107 psrset ee, ie
108 mov a0, sp
109 movi a1, \is_write
110 jbsr do_page_fault
081860b9
GR
111 jmpi ret_from_exception
112.endm
113
114.text
115
116tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
117tlbop_end 0
118
119tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
120tlbop_end 1
121
122tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
123#ifndef CONFIG_CPU_HAS_LDSTEX
124jbsr csky_cmpxchg_fixup
125#endif
126tlbop_end 1
127
128ENTRY(csky_systemcall)
bf241682 129 SAVE_ALL TRAP0_SIZE
48ede51f 130 zero_fp
081860b9
GR
131
132 psrset ee, ie
133
081860b9
GR
134 lrw r11, __NR_syscalls
135 cmphs syscallid, r11 /* Check nr of syscall */
136 bt ret_from_exception
137
138 lrw r13, sys_call_table
139 ixw r13, syscallid
140 ldw r11, (r13)
141 cmpnei r11, 0
142 bf ret_from_exception
143
144 mov r9, sp
145 bmaski r10, THREAD_SHIFT
146 andn r9, r10
48ede51f
GR
147 ldw r12, (r9, TINFO_FLAGS)
148 ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
149 cmpnei r12, 0
2f7932b0 150 bt csky_syscall_trace
081860b9
GR
151#if defined(__CSKYABIV2__)
152 subi sp, 8
153 stw r5, (sp, 0x4)
154 stw r4, (sp, 0x0)
155 jsr r11 /* Do system call */
156 addi sp, 8
157#else
158 jsr r11
159#endif
160 stw a0, (sp, LSAVE_A0) /* Save return value */
161 jmpi ret_from_exception
162
2f7932b0
GR
163csky_syscall_trace:
164 mov a0, sp /* sp = pt_regs pointer */
165 jbsr syscall_trace_enter
081860b9
GR
166 /* Prepare args before do system call */
167 ldw a0, (sp, LSAVE_A0)
168 ldw a1, (sp, LSAVE_A1)
169 ldw a2, (sp, LSAVE_A2)
170 ldw a3, (sp, LSAVE_A3)
171#if defined(__CSKYABIV2__)
172 subi sp, 8
173 stw r5, (sp, 0x4)
174 stw r4, (sp, 0x0)
175#else
176 ldw r6, (sp, LSAVE_A4)
177 ldw r7, (sp, LSAVE_A5)
178#endif
179 jsr r11 /* Do system call */
180#if defined(__CSKYABIV2__)
181 addi sp, 8
182#endif
183 stw a0, (sp, LSAVE_A0) /* Save return value */
184
2f7932b0
GR
185 mov a0, sp /* right now, sp --> pt_regs */
186 jbsr syscall_trace_exit
31295a72 187 br ret_from_exception
081860b9
GR
188
189ENTRY(ret_from_kernel_thread)
190 jbsr schedule_tail
48ede51f 191 mov a0, r10
081860b9
GR
192 jsr r9
193 jbsr ret_from_exception
194
195ENTRY(ret_from_fork)
196 jbsr schedule_tail
197 mov r9, sp
198 bmaski r10, THREAD_SHIFT
199 andn r9, r10
48ede51f
GR
200 ldw r12, (r9, TINFO_FLAGS)
201 ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
202 cmpnei r12, 0
bf241682 203 bf ret_from_exception
2f7932b0
GR
204 mov a0, sp /* sp = pt_regs pointer */
205 jbsr syscall_trace_exit
081860b9
GR
206
207ret_from_exception:
208 ld syscallid, (sp, LSAVE_PSR)
209 btsti syscallid, 31
210 bt 1f
211
212 /*
213 * Load address of current->thread_info, Then get address of task_struct
214 * Get task_needreshed in task_struct
215 */
216 mov r9, sp
217 bmaski r10, THREAD_SHIFT
218 andn r9, r10
219
48ede51f
GR
220 ldw r12, (r9, TINFO_FLAGS)
221 andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
222 cmpnei r12, 0
081860b9 223 bt exit_work
bf241682
GR
2241:
225 RESTORE_ALL
081860b9
GR
226
227exit_work:
bf241682
GR
228 lrw syscallid, ret_from_exception
229 mov lr, syscallid
230
48ede51f 231 btsti r12, TIF_NEED_RESCHED
081860b9 232 bt work_resched
bf241682
GR
233
234 mov a0, sp
48ede51f 235 mov a1, r12
bf241682 236 jmpi do_notify_resume
081860b9
GR
237
238work_resched:
081860b9
GR
239 jmpi schedule
240
081860b9 241ENTRY(csky_trap)
bf241682 242 SAVE_ALL 0
48ede51f 243 zero_fp
081860b9 244 psrset ee
081860b9
GR
245 mov a0, sp /* Push Stack pointer arg */
246 jbsr trap_c /* Call C-level trap handler */
247 jmpi ret_from_exception
248
249/*
250 * Prototype from libc for abiv1:
251 * register unsigned int __result asm("a0");
252 * asm( "trap 3" :"=r"(__result)::);
253 */
254ENTRY(csky_get_tls)
255 USPTOKSP
256
257 /* increase epc for continue */
258 mfcr a0, epc
bf241682 259 addi a0, TRAP0_SIZE
081860b9
GR
260 mtcr a0, epc
261
262 /* get current task thread_info with kernel 8K stack */
263 bmaski a0, THREAD_SHIFT
264 not a0
265 subi sp, 1
266 and a0, sp
267 addi sp, 1
268
269 /* get tls */
270 ldw a0, (a0, TINFO_TP_VALUE)
271
272 KSPTOUSP
273 rte
274
275ENTRY(csky_irq)
bf241682 276 SAVE_ALL 0
48ede51f 277 zero_fp
081860b9 278 psrset ee
081860b9
GR
279
280#ifdef CONFIG_PREEMPT
281 mov r9, sp /* Get current stack pointer */
282 bmaski r10, THREAD_SHIFT
283 andn r9, r10 /* Get thread_info */
284
285 /*
286 * Get task_struct->stack.preempt_count for current,
287 * and increase 1.
288 */
48ede51f
GR
289 ldw r12, (r9, TINFO_PREEMPT)
290 addi r12, 1
291 stw r12, (r9, TINFO_PREEMPT)
081860b9
GR
292#endif
293
294 mov a0, sp
295 jbsr csky_do_IRQ
296
297#ifdef CONFIG_PREEMPT
48ede51f
GR
298 subi r12, 1
299 stw r12, (r9, TINFO_PREEMPT)
300 cmpnei r12, 0
081860b9 301 bt 2f
48ede51f
GR
302 ldw r12, (r9, TINFO_FLAGS)
303 btsti r12, TIF_NEED_RESCHED
081860b9 304 bf 2f
081860b9 305 jbsr preempt_schedule_irq /* irq en/disable is done inside */
081860b9
GR
306#endif
3072:
308 jmpi ret_from_exception
309
310/*
311 * a0 = prev task_struct *
312 * a1 = next task_struct *
313 * a0 = return next
314 */
315ENTRY(__switch_to)
316 lrw a3, TASK_THREAD
317 addu a3, a0
318
319 mfcr a2, psr /* Save PSR value */
320 stw a2, (a3, THREAD_SR) /* Save PSR in task struct */
321 bclri a2, 6 /* Disable interrupts */
322 mtcr a2, psr
323
324 SAVE_SWITCH_STACK
325
326 stw sp, (a3, THREAD_KSP)
327
081860b9
GR
328 /* Set up next process to run */
329 lrw a3, TASK_THREAD
330 addu a3, a1
331
332 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
333
081860b9
GR
334 ldw a2, (a3, THREAD_SR) /* Set next PSR */
335 mtcr a2, psr
336
337#if defined(__CSKYABIV2__)
338 addi r7, a1, TASK_THREAD_INFO
339 ldw tls, (r7, TINFO_TP_VALUE)
340#endif
341
342 RESTORE_SWITCH_STACK
343
344 rts
345ENDPROC(__switch_to)