Commit | Line | Data |
---|---|---|
ca54502b MS |
1 | /* |
2 | * Low-level system-call handling, trap handlers and context-switching | |
3 | * | |
4 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> | |
5 | * Copyright (C) 2008-2009 PetaLogix | |
6 | * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au> | |
7 | * Copyright (C) 2001,2002 NEC Corporation | |
8 | * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org> | |
9 | * | |
10 | * This file is subject to the terms and conditions of the GNU General | |
11 | * Public License. See the file COPYING in the main directory of this | |
12 | * archive for more details. | |
13 | * | |
14 | * Written by Miles Bader <miles@gnu.org> | |
15 | * Heavily modified by John Williams for Microblaze | |
16 | */ | |
17 | ||
18 | #include <linux/sys.h> | |
19 | #include <linux/linkage.h> | |
20 | ||
21 | #include <asm/entry.h> | |
22 | #include <asm/current.h> | |
23 | #include <asm/processor.h> | |
24 | #include <asm/exceptions.h> | |
25 | #include <asm/asm-offsets.h> | |
26 | #include <asm/thread_info.h> | |
27 | ||
28 | #include <asm/page.h> | |
29 | #include <asm/unistd.h> | |
30 | ||
31 | #include <linux/errno.h> | |
32 | #include <asm/signal.h> | |
33 | ||
11d51360 MS |
34 | #undef DEBUG |
35 | ||
d8748e73 MS |
36 | #ifdef DEBUG |
37 | /* Create space for syscalls counting. */ | |
38 | .section .data | |
39 | .global syscall_debug_table | |
40 | .align 4 | |
41 | syscall_debug_table: | |
42 | .space (__NR_syscalls * 4) | |
43 | #endif /* DEBUG */ | |
44 | ||
ca54502b MS |
45 | #define C_ENTRY(name) .globl name; .align 4; name |
46 | ||
47 | /* | |
48 | * Various ways of setting and clearing BIP in flags reg. | |
49 | * This is mucky, but necessary using microblaze version that | |
50 | * allows msr ops to write to BIP | |
51 | */ | |
52 | #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR | |
53 | .macro clear_bip | |
66f7de86 | 54 | msrclr r0, MSR_BIP |
ca54502b MS |
55 | .endm |
56 | ||
57 | .macro set_bip | |
66f7de86 | 58 | msrset r0, MSR_BIP |
ca54502b MS |
59 | .endm |
60 | ||
61 | .macro clear_eip | |
66f7de86 | 62 | msrclr r0, MSR_EIP |
ca54502b MS |
63 | .endm |
64 | ||
65 | .macro set_ee | |
66f7de86 | 66 | msrset r0, MSR_EE |
ca54502b MS |
67 | .endm |
68 | ||
69 | .macro disable_irq | |
66f7de86 | 70 | msrclr r0, MSR_IE |
ca54502b MS |
71 | .endm |
72 | ||
73 | .macro enable_irq | |
66f7de86 | 74 | msrset r0, MSR_IE |
ca54502b MS |
75 | .endm |
76 | ||
77 | .macro set_ums | |
66f7de86 | 78 | msrset r0, MSR_UMS |
66f7de86 | 79 | msrclr r0, MSR_VMS |
ca54502b MS |
80 | .endm |
81 | ||
82 | .macro set_vms | |
66f7de86 | 83 | msrclr r0, MSR_UMS |
66f7de86 | 84 | msrset r0, MSR_VMS |
ca54502b MS |
85 | .endm |
86 | ||
b318067e | 87 | .macro clear_ums |
66f7de86 | 88 | msrclr r0, MSR_UMS |
b318067e MS |
89 | .endm |
90 | ||
ca54502b | 91 | .macro clear_vms_ums |
66f7de86 | 92 | msrclr r0, MSR_VMS | MSR_UMS |
ca54502b MS |
93 | .endm |
94 | #else | |
95 | .macro clear_bip | |
96 | mfs r11, rmsr | |
ca54502b MS |
97 | andi r11, r11, ~MSR_BIP |
98 | mts rmsr, r11 | |
ca54502b MS |
99 | .endm |
100 | ||
101 | .macro set_bip | |
102 | mfs r11, rmsr | |
ca54502b MS |
103 | ori r11, r11, MSR_BIP |
104 | mts rmsr, r11 | |
ca54502b MS |
105 | .endm |
106 | ||
107 | .macro clear_eip | |
108 | mfs r11, rmsr | |
ca54502b MS |
109 | andi r11, r11, ~MSR_EIP |
110 | mts rmsr, r11 | |
ca54502b MS |
111 | .endm |
112 | ||
113 | .macro set_ee | |
114 | mfs r11, rmsr | |
ca54502b MS |
115 | ori r11, r11, MSR_EE |
116 | mts rmsr, r11 | |
ca54502b MS |
117 | .endm |
118 | ||
119 | .macro disable_irq | |
120 | mfs r11, rmsr | |
ca54502b MS |
121 | andi r11, r11, ~MSR_IE |
122 | mts rmsr, r11 | |
ca54502b MS |
123 | .endm |
124 | ||
125 | .macro enable_irq | |
126 | mfs r11, rmsr | |
ca54502b MS |
127 | ori r11, r11, MSR_IE |
128 | mts rmsr, r11 | |
ca54502b MS |
129 | .endm |
130 | ||
131 | .macro set_ums | |
132 | mfs r11, rmsr | |
ca54502b MS |
133 | ori r11, r11, MSR_VMS |
134 | andni r11, r11, MSR_UMS | |
135 | mts rmsr, r11 | |
ca54502b MS |
136 | .endm |
137 | ||
138 | .macro set_vms | |
139 | mfs r11, rmsr | |
ca54502b MS |
140 | ori r11, r11, MSR_VMS |
141 | andni r11, r11, MSR_UMS | |
142 | mts rmsr, r11 | |
ca54502b MS |
143 | .endm |
144 | ||
b318067e MS |
145 | .macro clear_ums |
146 | mfs r11, rmsr | |
b318067e MS |
147 | andni r11, r11, MSR_UMS |
148 | mts rmsr,r11 | |
b318067e MS |
149 | .endm |
150 | ||
ca54502b MS |
151 | .macro clear_vms_ums |
152 | mfs r11, rmsr | |
ca54502b MS |
153 | andni r11, r11, (MSR_VMS|MSR_UMS) |
154 | mts rmsr,r11 | |
ca54502b MS |
155 | .endm |
156 | #endif | |
157 | ||
158 | /* Define how to call high-level functions. With MMU, virtual mode must be | |
159 | * enabled when calling the high-level function. Clobbers R11. | |
160 | * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL | |
161 | */ | |
162 | ||
163 | /* turn on virtual protected mode save */ | |
164 | #define VM_ON \ | |
a4a94dbf | 165 | set_ums; \ |
ca54502b | 166 | rted r0, 2f; \ |
a4a94dbf MS |
167 | nop; \ |
168 | 2: | |
ca54502b MS |
169 | |
170 | /* turn off virtual protected mode save and user mode save*/ | |
171 | #define VM_OFF \ | |
a4a94dbf | 172 | clear_vms_ums; \ |
ca54502b | 173 | rted r0, TOPHYS(1f); \ |
a4a94dbf MS |
174 | nop; \ |
175 | 1: | |
ca54502b MS |
176 | |
177 | #define SAVE_REGS \ | |
6e83557c MS |
178 | swi r2, r1, PT_R2; /* Save SDA */ \ |
179 | swi r3, r1, PT_R3; \ | |
180 | swi r4, r1, PT_R4; \ | |
181 | swi r5, r1, PT_R5; \ | |
182 | swi r6, r1, PT_R6; \ | |
183 | swi r7, r1, PT_R7; \ | |
184 | swi r8, r1, PT_R8; \ | |
185 | swi r9, r1, PT_R9; \ | |
186 | swi r10, r1, PT_R10; \ | |
187 | swi r11, r1, PT_R11; /* save clobbered regs after rval */\ | |
188 | swi r12, r1, PT_R12; \ | |
189 | swi r13, r1, PT_R13; /* Save SDA2 */ \ | |
190 | swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \ | |
191 | swi r15, r1, PT_R15; /* Save LP */ \ | |
192 | swi r16, r1, PT_R16; \ | |
193 | swi r17, r1, PT_R17; \ | |
194 | swi r18, r1, PT_R18; /* Save asm scratch reg */ \ | |
195 | swi r19, r1, PT_R19; \ | |
196 | swi r20, r1, PT_R20; \ | |
197 | swi r21, r1, PT_R21; \ | |
198 | swi r22, r1, PT_R22; \ | |
199 | swi r23, r1, PT_R23; \ | |
200 | swi r24, r1, PT_R24; \ | |
201 | swi r25, r1, PT_R25; \ | |
202 | swi r26, r1, PT_R26; \ | |
203 | swi r27, r1, PT_R27; \ | |
204 | swi r28, r1, PT_R28; \ | |
205 | swi r29, r1, PT_R29; \ | |
206 | swi r30, r1, PT_R30; \ | |
207 | swi r31, r1, PT_R31; /* Save current task reg */ \ | |
ca54502b | 208 | mfs r11, rmsr; /* save MSR */ \ |
6e83557c | 209 | swi r11, r1, PT_MSR; |
ca54502b MS |
210 | |
211 | #define RESTORE_REGS \ | |
6e83557c | 212 | lwi r11, r1, PT_MSR; \ |
ca54502b | 213 | mts rmsr , r11; \ |
6e83557c MS |
214 | lwi r2, r1, PT_R2; /* restore SDA */ \ |
215 | lwi r3, r1, PT_R3; \ | |
216 | lwi r4, r1, PT_R4; \ | |
217 | lwi r5, r1, PT_R5; \ | |
218 | lwi r6, r1, PT_R6; \ | |
219 | lwi r7, r1, PT_R7; \ | |
220 | lwi r8, r1, PT_R8; \ | |
221 | lwi r9, r1, PT_R9; \ | |
222 | lwi r10, r1, PT_R10; \ | |
223 | lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\ | |
224 | lwi r12, r1, PT_R12; \ | |
225 | lwi r13, r1, PT_R13; /* restore SDA2 */ \ | |
226 | lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\ | |
227 | lwi r15, r1, PT_R15; /* restore LP */ \ | |
228 | lwi r16, r1, PT_R16; \ | |
229 | lwi r17, r1, PT_R17; \ | |
230 | lwi r18, r1, PT_R18; /* restore asm scratch reg */ \ | |
231 | lwi r19, r1, PT_R19; \ | |
232 | lwi r20, r1, PT_R20; \ | |
233 | lwi r21, r1, PT_R21; \ | |
234 | lwi r22, r1, PT_R22; \ | |
235 | lwi r23, r1, PT_R23; \ | |
236 | lwi r24, r1, PT_R24; \ | |
237 | lwi r25, r1, PT_R25; \ | |
238 | lwi r26, r1, PT_R26; \ | |
239 | lwi r27, r1, PT_R27; \ | |
240 | lwi r28, r1, PT_R28; \ | |
241 | lwi r29, r1, PT_R29; \ | |
242 | lwi r30, r1, PT_R30; \ | |
243 | lwi r31, r1, PT_R31; /* Restore cur task reg */ | |
ca54502b | 244 | |
e5d2af2b MS |
245 | #define SAVE_STATE \ |
246 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \ | |
247 | /* See if already in kernel mode.*/ \ | |
248 | mfs r1, rmsr; \ | |
e5d2af2b MS |
249 | andi r1, r1, MSR_UMS; \ |
250 | bnei r1, 1f; \ | |
251 | /* Kernel-mode state save. */ \ | |
252 | /* Reload kernel stack-ptr. */ \ | |
253 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ | |
287503fa MS |
254 | /* FIXME: I can add these two lines to one */ \ |
255 | /* tophys(r1,r1); */ \ | |
6e83557c MS |
256 | /* addik r1, r1, -PT_SIZE; */ \ |
257 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ | |
e5d2af2b | 258 | SAVE_REGS \ |
e5d2af2b | 259 | brid 2f; \ |
6e83557c | 260 | swi r1, r1, PT_MODE; \ |
e5d2af2b MS |
261 | 1: /* User-mode state save. */ \ |
262 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ | |
263 | tophys(r1,r1); \ | |
264 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \ | |
287503fa MS |
265 | /* MS these three instructions can be added to one */ \ |
266 | /* addik r1, r1, THREAD_SIZE; */ \ | |
267 | /* tophys(r1,r1); */ \ | |
6e83557c MS |
268 | /* addik r1, r1, -PT_SIZE; */ \ |
269 | addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \ | |
e5d2af2b | 270 | SAVE_REGS \ |
e5d2af2b | 271 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \ |
6e83557c MS |
272 | swi r11, r1, PT_R1; /* Store user SP. */ \ |
273 | swi r0, r1, PT_MODE; /* Was in user-mode. */ \ | |
e5d2af2b MS |
274 | /* MS: I am clearing UMS even in case when I come from kernel space */ \ |
275 | clear_ums; \ | |
276 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | |
277 | ||
ca54502b MS |
278 | .text |
279 | ||
280 | /* | |
281 | * User trap. | |
282 | * | |
283 | * System calls are handled here. | |
284 | * | |
285 | * Syscall protocol: | |
286 | * Syscall number in r12, args in r5-r10 | |
287 | * Return value in r3 | |
288 | * | |
289 | * Trap entered via brki instruction, so BIP bit is set, and interrupts | |
290 | * are masked. This is nice, means we don't have to CLI before state save | |
291 | */ | |
292 | C_ENTRY(_user_exception): | |
0e41c909 | 293 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ |
9da63458 MS |
294 | addi r14, r14, 4 /* return address is 4 byte after call */ |
295 | ||
ca54502b MS |
296 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ |
297 | tophys(r1,r1); | |
298 | lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */ | |
9da63458 MS |
299 | /* calculate kernel stack pointer from task struct 8k */ |
300 | addik r1, r1, THREAD_SIZE; | |
301 | tophys(r1,r1); | |
302 | ||
6e83557c | 303 | addik r1, r1, -PT_SIZE; /* Make room on the stack. */ |
ca54502b | 304 | SAVE_REGS |
6e83557c MS |
305 | swi r0, r1, PT_R3 |
306 | swi r0, r1, PT_R4 | |
ca54502b | 307 | |
6e83557c | 308 | swi r0, r1, PT_MODE; /* Was in user-mode. */ |
ca54502b | 309 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
6e83557c | 310 | swi r11, r1, PT_R1; /* Store user SP. */ |
25f6e596 | 311 | clear_ums; |
9da63458 | 312 | 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
ca54502b | 313 | /* Save away the syscall number. */ |
6e83557c | 314 | swi r12, r1, PT_R0; |
ca54502b MS |
315 | tovirt(r1,r1) |
316 | ||
ca54502b MS |
317 | /* where the trap should return need -8 to adjust for rtsd r15, 8*/ |
318 | /* Jump to the appropriate function for the system call number in r12 | |
319 | * (r12 is not preserved), or return an error if r12 is not valid. The LP | |
320 | * register should point to the location where | |
321 | * the called function should return. [note that MAKE_SYS_CALL uses label 1] */ | |
23575483 | 322 | |
25f6e596 MS |
323 | /* Step into virtual mode */ |
324 | rtbd r0, 3f | |
23575483 MS |
325 | nop |
326 | 3: | |
b1d70c62 | 327 | lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ |
23575483 MS |
328 | lwi r11, r11, TI_FLAGS /* get flags in thread info */ |
329 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | |
330 | beqi r11, 4f | |
331 | ||
332 | addik r3, r0, -ENOSYS | |
6e83557c | 333 | swi r3, r1, PT_R3 |
23575483 | 334 | brlid r15, do_syscall_trace_enter |
6e83557c | 335 | addik r5, r1, PT_R0 |
23575483 MS |
336 | |
337 | # do_syscall_trace_enter returns the new syscall nr. | |
338 | addk r12, r0, r3 | |
6e83557c MS |
339 | lwi r5, r1, PT_R5; |
340 | lwi r6, r1, PT_R6; | |
341 | lwi r7, r1, PT_R7; | |
342 | lwi r8, r1, PT_R8; | |
343 | lwi r9, r1, PT_R9; | |
344 | lwi r10, r1, PT_R10; | |
23575483 MS |
345 | 4: |
346 | /* Jump to the appropriate function for the system call number in r12 | |
347 | * (r12 is not preserved), or return an error if r12 is not valid. | |
348 | * The LP register should point to the location where the called function | |
349 | * should return. [note that MAKE_SYS_CALL uses label 1] */ | |
350 | /* See if the system call number is valid */ | |
c2219eda | 351 | blti r12, 5f |
ca54502b | 352 | addi r11, r12, -__NR_syscalls; |
074fa7e7 | 353 | bgei r11, 5f; |
ca54502b MS |
354 | /* Figure out which function to use for this system call. */ |
355 | /* Note Microblaze barrel shift is optional, so don't rely on it */ | |
356 | add r12, r12, r12; /* convert num -> ptr */ | |
357 | add r12, r12, r12; | |
4de6ba68 | 358 | addi r30, r0, 1 /* restarts allowed */ |
ca54502b | 359 | |
11d51360 | 360 | #ifdef DEBUG |
d8748e73 MS |
361 | /* Trac syscalls and stored them to syscall_debug_table */ |
362 | /* The first syscall location stores total syscall number */ | |
363 | lwi r3, r0, syscall_debug_table | |
364 | addi r3, r3, 1 | |
365 | swi r3, r0, syscall_debug_table | |
366 | lwi r3, r12, syscall_debug_table | |
ca54502b | 367 | addi r3, r3, 1 |
d8748e73 | 368 | swi r3, r12, syscall_debug_table |
11d51360 | 369 | #endif |
23575483 MS |
370 | |
371 | # Find and jump into the syscall handler. | |
372 | lwi r12, r12, sys_call_table | |
373 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | |
b9ea77e2 | 374 | addi r15, r0, ret_from_trap-8 |
23575483 | 375 | bra r12 |
ca54502b | 376 | |
ca54502b | 377 | /* The syscall number is invalid, return an error. */ |
23575483 | 378 | 5: |
c2219eda | 379 | braid ret_from_trap |
ca54502b | 380 | addi r3, r0, -ENOSYS; |
ca54502b | 381 | |
23575483 | 382 | /* Entry point used to return from a syscall/trap */ |
ca54502b MS |
383 | /* We re-enable BIP bit before state restore */ |
384 | C_ENTRY(ret_from_trap): | |
6e83557c MS |
385 | swi r3, r1, PT_R3 |
386 | swi r4, r1, PT_R4 | |
b1d70c62 | 387 | |
6e83557c | 388 | lwi r11, r1, PT_MODE; |
9da63458 MS |
389 | /* See if returning to kernel mode, if so, skip resched &c. */ |
390 | bnei r11, 2f; | |
23575483 MS |
391 | /* We're returning to user mode, so check for various conditions that |
392 | * trigger rescheduling. */ | |
b1d70c62 MS |
393 | /* FIXME: Restructure all these flag checks. */ |
394 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ | |
23575483 MS |
395 | lwi r11, r11, TI_FLAGS; /* get flags in thread info */ |
396 | andi r11, r11, _TIF_WORK_SYSCALL_MASK | |
397 | beqi r11, 1f | |
398 | ||
23575483 | 399 | brlid r15, do_syscall_trace_leave |
6e83557c | 400 | addik r5, r1, PT_R0 |
23575483 | 401 | 1: |
ca54502b MS |
402 | /* We're returning to user mode, so check for various conditions that |
403 | * trigger rescheduling. */ | |
b1d70c62 MS |
404 | /* get thread info from current task */ |
405 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; | |
e9f92526 AV |
406 | lwi r19, r11, TI_FLAGS; /* get flags in thread info */ |
407 | andi r11, r19, _TIF_NEED_RESCHED; | |
ca54502b MS |
408 | beqi r11, 5f; |
409 | ||
ca54502b MS |
410 | bralid r15, schedule; /* Call scheduler */ |
411 | nop; /* delay slot */ | |
e9f92526 | 412 | bri 1b |
ca54502b MS |
413 | |
414 | /* Maybe handle a signal */ | |
074fa7e7 | 415 | 5: |
e9f92526 AV |
416 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
417 | beqi r11, 4f; /* Signals to handle, handle them */ | |
ca54502b | 418 | |
6e83557c | 419 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
969a9616 | 420 | bralid r15, do_notify_resume; /* Handle any signals */ |
14203e19 | 421 | add r6, r30, r0; /* Arg 2: int in_syscall */ |
e9f92526 AV |
422 | add r30, r0, r0 /* no more restarts */ |
423 | bri 1b | |
b1d70c62 MS |
424 | |
425 | /* Finally, return to user state. */ | |
e9f92526 | 426 | 4: set_bip; /* Ints masked for state restore */ |
8633bebc | 427 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
ca54502b MS |
428 | VM_OFF; |
429 | tophys(r1,r1); | |
430 | RESTORE_REGS; | |
6e83557c | 431 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
ca54502b | 432 | lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ |
9da63458 MS |
433 | bri 6f; |
434 | ||
435 | /* Return to kernel state. */ | |
436 | 2: set_bip; /* Ints masked for state restore */ | |
437 | VM_OFF; | |
438 | tophys(r1,r1); | |
439 | RESTORE_REGS; | |
6e83557c | 440 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
9da63458 MS |
441 | tovirt(r1,r1); |
442 | 6: | |
ca54502b MS |
443 | TRAP_return: /* Make global symbol for debugging */ |
444 | rtbd r14, 0; /* Instructions to return from an IRQ */ | |
445 | nop; | |
446 | ||
447 | ||
ca54502b MS |
448 | /* This the initial entry point for a new child thread, with an appropriate |
449 | stack in place that makes it look the the child is in the middle of an | |
450 | syscall. This function is actually `returned to' from switch_thread | |
451 | (copy_thread makes ret_from_fork the return address in each new thread's | |
452 | saved context). */ | |
453 | C_ENTRY(ret_from_fork): | |
454 | bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ | |
fd11ff73 | 455 | add r5, r3, r0; /* switch_thread returns the prev task */ |
ca54502b | 456 | /* ( in the delay slot ) */ |
ca54502b | 457 | brid ret_from_trap; /* Do normal trap return */ |
9814cc11 | 458 | add r3, r0, r0; /* Child's fork call should return 0. */ |
ca54502b | 459 | |
2319295d AV |
460 | C_ENTRY(ret_from_kernel_thread): |
461 | bralid r15, schedule_tail; /* ...which is schedule_tail's arg */ | |
462 | add r5, r3, r0; /* switch_thread returns the prev task */ | |
463 | /* ( in the delay slot ) */ | |
464 | brald r15, r20 /* fn was left in r20 */ | |
465 | addk r5, r0, r19 /* ... and argument - in r19 */ | |
99c59f60 AV |
466 | brid ret_from_trap |
467 | add r3, r0, r0 | |
2319295d | 468 | |
ca54502b | 469 | C_ENTRY(sys_rt_sigreturn_wrapper): |
14203e19 | 470 | addik r30, r0, 0 /* no restarts */ |
791d0a16 | 471 | brid sys_rt_sigreturn /* Do real work */ |
6e83557c | 472 | addik r5, r1, 0; /* add user context as 1st arg */ |
ca54502b MS |
473 | |
474 | /* | |
475 | * HW EXCEPTION rutine start | |
476 | */ | |
ca54502b | 477 | C_ENTRY(full_exception_trap): |
ca54502b MS |
478 | /* adjust exception address for privileged instruction |
479 | * for finding where is it */ | |
480 | addik r17, r17, -4 | |
481 | SAVE_STATE /* Save registers */ | |
06a54604 | 482 | /* PC, before IRQ/trap - this is one instruction above */ |
6e83557c | 483 | swi r17, r1, PT_PC; |
06a54604 | 484 | tovirt(r1,r1) |
ca54502b MS |
485 | /* FIXME this can be store directly in PT_ESR reg. |
486 | * I tested it but there is a fault */ | |
487 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ | |
b9ea77e2 | 488 | addik r15, r0, ret_from_exc - 8 |
ca54502b | 489 | mfs r6, resr |
ca54502b | 490 | mfs r7, rfsr; /* save FSR */ |
131e4e97 | 491 | mts rfsr, r0; /* Clear sticky fsr */ |
c318d483 | 492 | rted r0, full_exception |
6e83557c | 493 | addik r5, r1, 0 /* parameter struct pt_regs * regs */ |
ca54502b MS |
494 | |
495 | /* | |
496 | * Unaligned data trap. | |
497 | * | |
498 | * Unaligned data trap last on 4k page is handled here. | |
499 | * | |
500 | * Trap entered via exception, so EE bit is set, and interrupts | |
501 | * are masked. This is nice, means we don't have to CLI before state save | |
502 | * | |
503 | * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S" | |
504 | */ | |
505 | C_ENTRY(unaligned_data_trap): | |
8b110d15 MS |
506 | /* MS: I have to save r11 value and then restore it because |
507 | * set_bit, clear_eip, set_ee use r11 as temp register if MSR | |
508 | * instructions are not used. We don't need to do if MSR instructions | |
509 | * are used and they use r0 instead of r11. | |
510 | * I am using ENTRY_SP which should be primary used only for stack | |
511 | * pointer saving. */ | |
512 | swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | |
513 | set_bip; /* equalize initial state for all possible entries */ | |
514 | clear_eip; | |
515 | set_ee; | |
516 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); | |
ca54502b | 517 | SAVE_STATE /* Save registers.*/ |
06a54604 | 518 | /* PC, before IRQ/trap - this is one instruction above */ |
6e83557c | 519 | swi r17, r1, PT_PC; |
06a54604 | 520 | tovirt(r1,r1) |
ca54502b | 521 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
b9ea77e2 | 522 | addik r15, r0, ret_from_exc-8 |
ca54502b | 523 | mfs r3, resr /* ESR */ |
ca54502b | 524 | mfs r4, rear /* EAR */ |
c318d483 | 525 | rtbd r0, _unaligned_data_exception |
6e83557c | 526 | addik r7, r1, 0 /* parameter struct pt_regs * regs */ |
ca54502b MS |
527 | |
528 | /* | |
529 | * Page fault traps. | |
530 | * | |
531 | * If the real exception handler (from hw_exception_handler.S) didn't find | |
532 | * the mapping for the process, then we're thrown here to handle such situation. | |
533 | * | |
534 | * Trap entered via exceptions, so EE bit is set, and interrupts | |
535 | * are masked. This is nice, means we don't have to CLI before state save | |
536 | * | |
537 | * Build a standard exception frame for TLB Access errors. All TLB exceptions | |
538 | * will bail out to this point if they can't resolve the lightweight TLB fault. | |
539 | * | |
540 | * The C function called is in "arch/microblaze/mm/fault.c", declared as: | |
541 | * void do_page_fault(struct pt_regs *regs, | |
542 | * unsigned long address, | |
543 | * unsigned long error_code) | |
544 | */ | |
545 | /* data and intruction trap - which is choose is resolved int fault.c */ | |
546 | C_ENTRY(page_fault_data_trap): | |
ca54502b | 547 | SAVE_STATE /* Save registers.*/ |
06a54604 | 548 | /* PC, before IRQ/trap - this is one instruction above */ |
6e83557c | 549 | swi r17, r1, PT_PC; |
06a54604 | 550 | tovirt(r1,r1) |
ca54502b | 551 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
b9ea77e2 | 552 | addik r15, r0, ret_from_exc-8 |
ca54502b | 553 | mfs r6, rear /* parameter unsigned long address */ |
ca54502b | 554 | mfs r7, resr /* parameter unsigned long error_code */ |
c318d483 | 555 | rted r0, do_page_fault |
6e83557c | 556 | addik r5, r1, 0 /* parameter struct pt_regs * regs */ |
ca54502b MS |
557 | |
558 | C_ENTRY(page_fault_instr_trap): | |
ca54502b | 559 | SAVE_STATE /* Save registers.*/ |
06a54604 | 560 | /* PC, before IRQ/trap - this is one instruction above */ |
6e83557c | 561 | swi r17, r1, PT_PC; |
06a54604 | 562 | tovirt(r1,r1) |
ca54502b | 563 | /* where the trap should return need -8 to adjust for rtsd r15, 8 */ |
b9ea77e2 | 564 | addik r15, r0, ret_from_exc-8 |
ca54502b | 565 | mfs r6, rear /* parameter unsigned long address */ |
ca54502b | 566 | ori r7, r0, 0 /* parameter unsigned long error_code */ |
9814cc11 | 567 | rted r0, do_page_fault |
6e83557c | 568 | addik r5, r1, 0 /* parameter struct pt_regs * regs */ |
ca54502b MS |
569 | |
570 | /* Entry point used to return from an exception. */ | |
571 | C_ENTRY(ret_from_exc): | |
6e83557c | 572 | lwi r11, r1, PT_MODE; |
ca54502b MS |
573 | bnei r11, 2f; /* See if returning to kernel mode, */ |
574 | /* ... if so, skip resched &c. */ | |
575 | ||
576 | /* We're returning to user mode, so check for various conditions that | |
577 | trigger rescheduling. */ | |
e9f92526 | 578 | 1: |
b1d70c62 | 579 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
e9f92526 AV |
580 | lwi r19, r11, TI_FLAGS; /* get flags in thread info */ |
581 | andi r11, r19, _TIF_NEED_RESCHED; | |
ca54502b MS |
582 | beqi r11, 5f; |
583 | ||
584 | /* Call the scheduler before returning from a syscall/trap. */ | |
585 | bralid r15, schedule; /* Call scheduler */ | |
586 | nop; /* delay slot */ | |
e9f92526 | 587 | bri 1b |
ca54502b MS |
588 | |
589 | /* Maybe handle a signal */ | |
e9f92526 AV |
590 | 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
591 | beqi r11, 4f; /* Signals to handle, handle them */ | |
ca54502b MS |
592 | |
593 | /* | |
594 | * Handle a signal return; Pending signals should be in r18. | |
595 | * | |
596 | * Not all registers are saved by the normal trap/interrupt entry | |
597 | * points (for instance, call-saved registers (because the normal | |
598 | * C-compiler calling sequence in the kernel makes sure they're | |
599 | * preserved), and call-clobbered registers in the case of | |
600 | * traps), but signal handlers may want to examine or change the | |
601 | * complete register state. Here we save anything not saved by | |
602 | * the normal entry sequence, so that it may be safely restored | |
969a9616 | 603 | * (in a possibly modified form) after do_notify_resume returns. */ |
6e83557c | 604 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
969a9616 | 605 | bralid r15, do_notify_resume; /* Handle any signals */ |
83140191 | 606 | addi r6, r0, 0; /* Arg 2: int in_syscall */ |
e9f92526 | 607 | bri 1b |
ca54502b MS |
608 | |
609 | /* Finally, return to user state. */ | |
e9f92526 | 610 | 4: set_bip; /* Ints masked for state restore */ |
8633bebc | 611 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
ca54502b MS |
612 | VM_OFF; |
613 | tophys(r1,r1); | |
614 | ||
ca54502b | 615 | RESTORE_REGS; |
6e83557c | 616 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
ca54502b MS |
617 | |
618 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */ | |
619 | bri 6f; | |
620 | /* Return to kernel state. */ | |
96014cc3 MS |
621 | 2: set_bip; /* Ints masked for state restore */ |
622 | VM_OFF; | |
ca54502b | 623 | tophys(r1,r1); |
ca54502b | 624 | RESTORE_REGS; |
6e83557c | 625 | addik r1, r1, PT_SIZE /* Clean up stack space. */ |
ca54502b MS |
626 | |
627 | tovirt(r1,r1); | |
628 | 6: | |
629 | EXC_return: /* Make global symbol for debugging */ | |
630 | rtbd r14, 0; /* Instructions to return from an IRQ */ | |
631 | nop; | |
632 | ||
633 | /* | |
634 | * HW EXCEPTION rutine end | |
635 | */ | |
636 | ||
637 | /* | |
638 | * Hardware maskable interrupts. | |
639 | * | |
640 | * The stack-pointer (r1) should have already been saved to the memory | |
641 | * location PER_CPU(ENTRY_SP). | |
642 | */ | |
643 | C_ENTRY(_interrupt): | |
644 | /* MS: we are in physical address */ | |
645 | /* Save registers, switch to proper stack, convert SP to virtual.*/ | |
646 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | |
ca54502b | 647 | /* MS: See if already in kernel mode. */ |
653e447e | 648 | mfs r1, rmsr |
5c0d72b1 | 649 | nop |
653e447e MS |
650 | andi r1, r1, MSR_UMS |
651 | bnei r1, 1f | |
ca54502b MS |
652 | |
653 | /* Kernel-mode state save. */ | |
653e447e MS |
654 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) |
655 | tophys(r1,r1); /* MS: I have in r1 physical address where stack is */ | |
ca54502b MS |
656 | /* save registers */ |
657 | /* MS: Make room on the stack -> activation record */ | |
6e83557c | 658 | addik r1, r1, -PT_SIZE; |
ca54502b | 659 | SAVE_REGS |
ca54502b | 660 | brid 2f; |
6e83557c | 661 | swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */ |
ca54502b MS |
662 | 1: |
663 | /* User-mode state save. */ | |
ca54502b MS |
664 | /* MS: get the saved current */ |
665 | lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); | |
666 | tophys(r1,r1); | |
667 | lwi r1, r1, TS_THREAD_INFO; | |
668 | addik r1, r1, THREAD_SIZE; | |
669 | tophys(r1,r1); | |
670 | /* save registers */ | |
6e83557c | 671 | addik r1, r1, -PT_SIZE; |
ca54502b MS |
672 | SAVE_REGS |
673 | /* calculate mode */ | |
6e83557c | 674 | swi r0, r1, PT_MODE; |
ca54502b | 675 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
6e83557c | 676 | swi r11, r1, PT_R1; |
80c5ff6b | 677 | clear_ums; |
ca54502b | 678 | 2: |
b1d70c62 | 679 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
ca54502b | 680 | tovirt(r1,r1) |
b9ea77e2 | 681 | addik r15, r0, irq_call; |
80c5ff6b | 682 | irq_call:rtbd r0, do_IRQ; |
6e83557c | 683 | addik r5, r1, 0; |
ca54502b MS |
684 | |
685 | /* MS: we are in virtual mode */ | |
686 | ret_from_irq: | |
6e83557c | 687 | lwi r11, r1, PT_MODE; |
ca54502b MS |
688 | bnei r11, 2f; |
689 | ||
e9f92526 | 690 | 1: |
b1d70c62 | 691 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
e9f92526 AV |
692 | lwi r19, r11, TI_FLAGS; /* MS: get flags from thread info */ |
693 | andi r11, r19, _TIF_NEED_RESCHED; | |
ca54502b MS |
694 | beqi r11, 5f |
695 | bralid r15, schedule; | |
696 | nop; /* delay slot */ | |
e9f92526 | 697 | bri 1b |
ca54502b MS |
698 | |
699 | /* Maybe handle a signal */ | |
e9f92526 | 700 | 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
ca54502b MS |
701 | beqid r11, no_intr_resched |
702 | /* Handle a signal return; Pending signals should be in r18. */ | |
6e83557c | 703 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
969a9616 | 704 | bralid r15, do_notify_resume; /* Handle any signals */ |
83140191 | 705 | addi r6, r0, 0; /* Arg 2: int in_syscall */ |
e9f92526 | 706 | bri 1b |
ca54502b MS |
707 | |
708 | /* Finally, return to user state. */ | |
709 | no_intr_resched: | |
710 | /* Disable interrupts, we are now committed to the state restore */ | |
711 | disable_irq | |
8633bebc | 712 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); |
ca54502b MS |
713 | VM_OFF; |
714 | tophys(r1,r1); | |
ca54502b | 715 | RESTORE_REGS |
6e83557c | 716 | addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ |
ca54502b MS |
717 | lwi r1, r1, PT_R1 - PT_SIZE; |
718 | bri 6f; | |
719 | /* MS: Return to kernel state. */ | |
77753790 MS |
720 | 2: |
721 | #ifdef CONFIG_PREEMPT | |
b1d70c62 | 722 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; |
77753790 MS |
723 | /* MS: get preempt_count from thread info */ |
724 | lwi r5, r11, TI_PREEMPT_COUNT; | |
725 | bgti r5, restore; | |
726 | ||
727 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ | |
728 | andi r5, r5, _TIF_NEED_RESCHED; | |
729 | beqi r5, restore /* if zero jump over */ | |
730 | ||
731 | preempt: | |
732 | /* interrupts are off that's why I am calling preempt_chedule_irq */ | |
733 | bralid r15, preempt_schedule_irq | |
734 | nop | |
b1d70c62 | 735 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
77753790 MS |
736 | lwi r5, r11, TI_FLAGS; /* get flags in thread info */ |
737 | andi r5, r5, _TIF_NEED_RESCHED; | |
738 | bnei r5, preempt /* if non zero jump to resched */ | |
739 | restore: | |
740 | #endif | |
741 | VM_OFF /* MS: turn off MMU */ | |
ca54502b | 742 | tophys(r1,r1) |
ca54502b | 743 | RESTORE_REGS |
6e83557c | 744 | addik r1, r1, PT_SIZE /* MS: Clean up stack space. */ |
ca54502b MS |
745 | tovirt(r1,r1); |
746 | 6: | |
747 | IRQ_return: /* MS: Make global symbol for debugging */ | |
748 | rtid r14, 0 | |
749 | nop | |
750 | ||
751 | /* | |
2d5973cb MS |
752 | * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18 |
753 | * and call handling function with saved pt_regs | |
ca54502b MS |
754 | */ |
755 | C_ENTRY(_debug_exception): | |
756 | /* BIP bit is set on entry, no interrupts can occur */ | |
757 | swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) | |
758 | ||
653e447e | 759 | mfs r1, rmsr |
5c0d72b1 | 760 | nop |
653e447e MS |
761 | andi r1, r1, MSR_UMS |
762 | bnei r1, 1f | |
2d5973cb | 763 | /* MS: Kernel-mode state save - kgdb */ |
653e447e | 764 | lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/ |
ca54502b | 765 | |
2d5973cb | 766 | /* BIP bit is set on entry, no interrupts can occur */ |
6e83557c | 767 | addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; |
ca54502b | 768 | SAVE_REGS; |
2d5973cb | 769 | /* save all regs to pt_reg structure */ |
6e83557c MS |
770 | swi r0, r1, PT_R0; /* R0 must be saved too */ |
771 | swi r14, r1, PT_R14 /* rewrite saved R14 value */ | |
772 | swi r16, r1, PT_PC; /* PC and r16 are the same */ | |
2d5973cb MS |
773 | /* save special purpose registers to pt_regs */ |
774 | mfs r11, rear; | |
6e83557c | 775 | swi r11, r1, PT_EAR; |
2d5973cb | 776 | mfs r11, resr; |
6e83557c | 777 | swi r11, r1, PT_ESR; |
2d5973cb | 778 | mfs r11, rfsr; |
6e83557c | 779 | swi r11, r1, PT_FSR; |
2d5973cb MS |
780 | |
781 | /* stack pointer is in physical address at it is decrease | |
6e83557c MS |
782 | * by PT_SIZE but we need to get correct R1 value */ |
783 | addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE; | |
784 | swi r11, r1, PT_R1 | |
2d5973cb MS |
785 | /* MS: r31 - current pointer isn't changed */ |
786 | tovirt(r1,r1) | |
787 | #ifdef CONFIG_KGDB | |
6e83557c | 788 | addi r5, r1, 0 /* pass pt_reg address as the first arg */ |
cd341577 | 789 | addik r15, r0, dbtrap_call; /* return address */ |
2d5973cb MS |
790 | rtbd r0, microblaze_kgdb_break |
791 | nop; | |
792 | #endif | |
793 | /* MS: Place handler for brki from kernel space if KGDB is OFF. | |
794 | * It is very unlikely that another brki instruction is called. */ | |
795 | bri 0 | |
ca54502b | 796 | |
2d5973cb MS |
797 | /* MS: User-mode state save - gdb */ |
798 | 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ | |
ca54502b MS |
799 | tophys(r1,r1); |
800 | lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ | |
801 | addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */ | |
802 | tophys(r1,r1); | |
803 | ||
6e83557c | 804 | addik r1, r1, -PT_SIZE; /* Make room on the stack. */ |
ca54502b | 805 | SAVE_REGS; |
6e83557c MS |
806 | swi r16, r1, PT_PC; /* Save LP */ |
807 | swi r0, r1, PT_MODE; /* Was in user-mode. */ | |
ca54502b | 808 | lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); |
6e83557c | 809 | swi r11, r1, PT_R1; /* Store user SP. */ |
2d5973cb | 810 | lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); |
ca54502b | 811 | tovirt(r1,r1) |
06b28640 | 812 | set_vms; |
6e83557c | 813 | addik r5, r1, 0; |
b9ea77e2 | 814 | addik r15, r0, dbtrap_call; |
2d5973cb | 815 | dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */ |
751f1605 MS |
816 | rtbd r0, sw_exception |
817 | nop | |
ca54502b | 818 | |
2d5973cb MS |
819 | /* MS: The first instruction for the second part of the gdb/kgdb */ |
820 | set_bip; /* Ints masked for state restore */ | |
6e83557c | 821 | lwi r11, r1, PT_MODE; |
ca54502b | 822 | bnei r11, 2f; |
2d5973cb | 823 | /* MS: Return to user space - gdb */ |
e9f92526 | 824 | 1: |
ca54502b | 825 | /* Get current task ptr into r11 */ |
b1d70c62 | 826 | lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ |
e9f92526 AV |
827 | lwi r19, r11, TI_FLAGS; /* get flags in thread info */ |
828 | andi r11, r19, _TIF_NEED_RESCHED; | |
ca54502b MS |
829 | beqi r11, 5f; |
830 | ||
2d5973cb | 831 | /* Call the scheduler before returning from a syscall/trap. */ |
ca54502b MS |
832 | bralid r15, schedule; /* Call scheduler */ |
833 | nop; /* delay slot */ | |
e9f92526 | 834 | bri 1b |
ca54502b MS |
835 | |
836 | /* Maybe handle a signal */ | |
e9f92526 AV |
837 | 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
838 | beqi r11, 4f; /* Signals to handle, handle them */ | |
ca54502b | 839 | |
6e83557c | 840 | addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */ |
969a9616 | 841 | bralid r15, do_notify_resume; /* Handle any signals */ |
83140191 | 842 | addi r6, r0, 0; /* Arg 2: int in_syscall */ |
e9f92526 | 843 | bri 1b |
ca54502b | 844 | |
ca54502b | 845 | /* Finally, return to user state. */ |
e9f92526 | 846 | 4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ |
ca54502b MS |
847 | VM_OFF; |
848 | tophys(r1,r1); | |
2d5973cb | 849 | /* MS: Restore all regs */ |
ca54502b | 850 | RESTORE_REGS |
6e83557c | 851 | addik r1, r1, PT_SIZE /* Clean up stack space */ |
2d5973cb MS |
852 | lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */ |
853 | DBTRAP_return_user: /* MS: Make global symbol for debugging */ | |
854 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ | |
855 | nop; | |
ca54502b | 856 | |
2d5973cb | 857 | /* MS: Return to kernel state - kgdb */ |
ca54502b MS |
858 | 2: VM_OFF; |
859 | tophys(r1,r1); | |
2d5973cb | 860 | /* MS: Restore all regs */ |
ca54502b | 861 | RESTORE_REGS |
6e83557c MS |
862 | lwi r14, r1, PT_R14; |
863 | lwi r16, r1, PT_PC; | |
864 | addik r1, r1, PT_SIZE; /* MS: Clean up stack space */ | |
ca54502b | 865 | tovirt(r1,r1); |
2d5973cb MS |
866 | DBTRAP_return_kernel: /* MS: Make global symbol for debugging */ |
867 | rtbd r16, 0; /* MS: Instructions to return from a debug trap */ | |
ca54502b MS |
868 | nop; |
869 | ||
870 | ||
ca54502b MS |
871 | ENTRY(_switch_to) |
872 | /* prepare return value */ | |
b1d70c62 | 873 | addk r3, r0, CURRENT_TASK |
ca54502b MS |
874 | |
875 | /* save registers in cpu_context */ | |
876 | /* use r11 and r12, volatile registers, as temp register */ | |
877 | /* give start of cpu_context for previous process */ | |
878 | addik r11, r5, TI_CPU_CONTEXT | |
879 | swi r1, r11, CC_R1 | |
880 | swi r2, r11, CC_R2 | |
881 | /* skip volatile registers. | |
882 | * they are saved on stack when we jumped to _switch_to() */ | |
883 | /* dedicated registers */ | |
884 | swi r13, r11, CC_R13 | |
885 | swi r14, r11, CC_R14 | |
886 | swi r15, r11, CC_R15 | |
887 | swi r16, r11, CC_R16 | |
888 | swi r17, r11, CC_R17 | |
889 | swi r18, r11, CC_R18 | |
890 | /* save non-volatile registers */ | |
891 | swi r19, r11, CC_R19 | |
892 | swi r20, r11, CC_R20 | |
893 | swi r21, r11, CC_R21 | |
894 | swi r22, r11, CC_R22 | |
895 | swi r23, r11, CC_R23 | |
896 | swi r24, r11, CC_R24 | |
897 | swi r25, r11, CC_R25 | |
898 | swi r26, r11, CC_R26 | |
899 | swi r27, r11, CC_R27 | |
900 | swi r28, r11, CC_R28 | |
901 | swi r29, r11, CC_R29 | |
902 | swi r30, r11, CC_R30 | |
903 | /* special purpose registers */ | |
904 | mfs r12, rmsr | |
ca54502b MS |
905 | swi r12, r11, CC_MSR |
906 | mfs r12, rear | |
ca54502b MS |
907 | swi r12, r11, CC_EAR |
908 | mfs r12, resr | |
ca54502b MS |
909 | swi r12, r11, CC_ESR |
910 | mfs r12, rfsr | |
ca54502b MS |
911 | swi r12, r11, CC_FSR |
912 | ||
b1d70c62 MS |
913 | /* update r31, the current-give me pointer to task which will be next */ |
914 | lwi CURRENT_TASK, r6, TI_TASK | |
ca54502b | 915 | /* stored it to current_save too */ |
b1d70c62 | 916 | swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) |
ca54502b MS |
917 | |
918 | /* get new process' cpu context and restore */ | |
919 | /* give me start where start context of next task */ | |
920 | addik r11, r6, TI_CPU_CONTEXT | |
921 | ||
922 | /* non-volatile registers */ | |
923 | lwi r30, r11, CC_R30 | |
924 | lwi r29, r11, CC_R29 | |
925 | lwi r28, r11, CC_R28 | |
926 | lwi r27, r11, CC_R27 | |
927 | lwi r26, r11, CC_R26 | |
928 | lwi r25, r11, CC_R25 | |
929 | lwi r24, r11, CC_R24 | |
930 | lwi r23, r11, CC_R23 | |
931 | lwi r22, r11, CC_R22 | |
932 | lwi r21, r11, CC_R21 | |
933 | lwi r20, r11, CC_R20 | |
934 | lwi r19, r11, CC_R19 | |
935 | /* dedicated registers */ | |
936 | lwi r18, r11, CC_R18 | |
937 | lwi r17, r11, CC_R17 | |
938 | lwi r16, r11, CC_R16 | |
939 | lwi r15, r11, CC_R15 | |
940 | lwi r14, r11, CC_R14 | |
941 | lwi r13, r11, CC_R13 | |
942 | /* skip volatile registers */ | |
943 | lwi r2, r11, CC_R2 | |
944 | lwi r1, r11, CC_R1 | |
945 | ||
946 | /* special purpose registers */ | |
947 | lwi r12, r11, CC_FSR | |
948 | mts rfsr, r12 | |
ca54502b MS |
949 | lwi r12, r11, CC_MSR |
950 | mts rmsr, r12 | |
ca54502b MS |
951 | |
952 | rtsd r15, 8 | |
953 | nop | |
954 | ||
955 | ENTRY(_reset) | |
7574349c | 956 | brai 0; /* Jump to reset vector */ |
ca54502b | 957 | |
ca54502b MS |
958 | /* These are compiled and loaded into high memory, then |
959 | * copied into place in mach_early_setup */ | |
960 | .section .init.ivt, "ax" | |
0b9b0200 | 961 | #if CONFIG_MANUAL_RESET_VECTOR |
ca54502b | 962 | .org 0x0 |
0b9b0200 MS |
963 | brai CONFIG_MANUAL_RESET_VECTOR |
964 | #endif | |
626afa35 | 965 | .org 0x8 |
ca54502b | 966 | brai TOPHYS(_user_exception); /* syscall handler */ |
626afa35 | 967 | .org 0x10 |
ca54502b | 968 | brai TOPHYS(_interrupt); /* Interrupt handler */ |
626afa35 | 969 | .org 0x18 |
751f1605 | 970 | brai TOPHYS(_debug_exception); /* debug trap handler */ |
626afa35 | 971 | .org 0x20 |
ca54502b MS |
972 | brai TOPHYS(_hw_exception_handler); /* HW exception handler */ |
973 | ||
ca54502b MS |
974 | .section .rodata,"a" |
975 | #include "syscall_table.S" | |
976 | ||
977 | syscall_table_size=(.-sys_call_table) | |
978 | ||
ce3266c0 SM |
979 | type_SYSCALL: |
980 | .ascii "SYSCALL\0" | |
981 | type_IRQ: | |
982 | .ascii "IRQ\0" | |
983 | type_IRQ_PREEMPT: | |
984 | .ascii "IRQ (PREEMPTED)\0" | |
985 | type_SYSCALL_PREEMPT: | |
986 | .ascii " SYSCALL (PREEMPTED)\0" | |
987 | ||
988 | /* | |
989 | * Trap decoding for stack unwinder | |
990 | * Tuples are (start addr, end addr, string) | |
991 | * If return address lies on [start addr, end addr], | |
992 | * unwinder displays 'string' | |
993 | */ | |
994 | ||
995 | .align 4 | |
996 | .global microblaze_trap_handlers | |
997 | microblaze_trap_handlers: | |
998 | /* Exact matches come first */ | |
999 | .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL | |
1000 | .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ | |
1001 | /* Fuzzy matches go here */ | |
1002 | .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT | |
1003 | .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT | |
1004 | /* End of table */ | |
1005 | .word 0 ; .word 0 ; .word 0 |