Commit | Line | Data |
---|---|---|
1394f032 BW |
1 | /* |
2 | * File: arch/blackfin/mach-common/entry.S | |
3 | * Based on: | |
4 | * Author: Linus Torvalds | |
5 | * | |
6 | * Created: ? | |
7 | * Description: contains the system-call and fault low-level handling routines. | |
8 | * This also contains the timer-interrupt handler, as well as all | |
9 | * interrupts and faults that can result in a task-switch. | |
10 | * | |
11 | * Modified: | |
12 | * Copyright 2004-2006 Analog Devices Inc. | |
13 | * | |
14 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | |
15 | * | |
16 | * This program is free software; you can redistribute it and/or modify | |
17 | * it under the terms of the GNU General Public License as published by | |
18 | * the Free Software Foundation; either version 2 of the License, or | |
19 | * (at your option) any later version. | |
20 | * | |
21 | * This program is distributed in the hope that it will be useful, | |
22 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
23 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
24 | * GNU General Public License for more details. | |
25 | * | |
26 | * You should have received a copy of the GNU General Public License | |
27 | * along with this program; if not, see the file COPYING, or write | |
28 | * to the Free Software Foundation, Inc., | |
29 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
30 | */ | |
31 | ||
32 | /* | |
33 | * 25-Dec-2004 - LG Soft India | |
34 | * 1. Fix in return_from_int, to make sure any pending | |
35 | * system call in ILAT for this process to get | |
36 | * executed, otherwise in case context switch happens, | |
37 | * system call of first process (i.e in ILAT) will be | |
38 | * carried forward to the switched process. | |
39 | * 2. Removed Constant references for the following | |
40 | * a. IPEND | |
41 | * b. EXCAUSE mask | |
42 | * c. PAGE Mask | |
43 | */ | |
44 | ||
45 | /* | |
46 | * NOTE: This code handles signal-recognition, which happens every time | |
47 | * after a timer-interrupt and after each system call. | |
48 | */ | |
49 | ||
50 | ||
51 | #include <linux/linkage.h> | |
1f83b8f1 | 52 | #include <linux/unistd.h> |
1394f032 | 53 | #include <asm/blackfin.h> |
1394f032 BW |
54 | #include <asm/errno.h> |
55 | #include <asm/thread_info.h> /* TIF_NEED_RESCHED */ | |
56 | #include <asm/asm-offsets.h> | |
669b792c | 57 | #include <asm/trace.h> |
1394f032 BW |
58 | |
59 | #include <asm/mach-common/context.S> | |
60 | ||
f0b5d12f MF |
61 | #if defined(CONFIG_BFIN_SCRATCH_REG_RETN) |
62 | # define EX_SCRATCH_REG RETN | |
63 | #elif defined(CONFIG_BFIN_SCRATCH_REG_RETE) | |
64 | # define EX_SCRATCH_REG RETE | |
65 | #else | |
66 | # define EX_SCRATCH_REG CYCLES | |
67 | #endif | |
68 | ||
1394f032 BW |
69 | #ifdef CONFIG_EXCPT_IRQ_SYSC_L1 |
70 | .section .l1.text | |
71 | #else | |
72 | .text | |
73 | #endif | |
74 | ||
75 | /* Slightly simplified and streamlined entry point for CPLB misses. | |
76 | * This one does not lower the level to IRQ5, and thus can be used to | |
77 | * patch up CPLB misses on the kernel stack. | |
78 | */ | |
79 | ENTRY(_ex_dcplb) | |
1aafd909 | 80 | #if ANOMALY_05000261 |
1394f032 BW |
81 | /* |
82 | * Work around an anomaly: if we see a new DCPLB fault, return | |
83 | * without doing anything. Then, if we get the same fault again, | |
84 | * handle it. | |
85 | */ | |
86 | p5.l = _last_cplb_fault_retx; | |
87 | p5.h = _last_cplb_fault_retx; | |
88 | r7 = [p5]; | |
89 | r6 = retx; | |
90 | [p5] = r6; | |
91 | cc = r6 == r7; | |
92 | if !cc jump _return_from_exception; | |
93 | /* fall through */ | |
94 | #endif | |
51be24c3 | 95 | ENDPROC(_ex_dcplb) |
1394f032 BW |
96 | |
97 | ENTRY(_ex_icplb) | |
98 | (R7:6,P5:4) = [sp++]; | |
99 | ASTAT = [sp++]; | |
100 | SAVE_ALL_SYS | |
101 | call __cplb_hdr; | |
669b792c | 102 | DEBUG_START_HWTRACE(p5, r7) |
1394f032 | 103 | RESTORE_ALL_SYS |
f0b5d12f | 104 | SP = EX_SCRATCH_REG; |
1394f032 | 105 | rtx; |
51be24c3 | 106 | ENDPROC(_ex_icplb) |
1394f032 | 107 | |
1394f032 | 108 | ENTRY(_ex_syscall) |
669b792c | 109 | DEBUG_START_HWTRACE(p5, r7) |
1394f032 BW |
110 | (R7:6,P5:4) = [sp++]; |
111 | ASTAT = [sp++]; | |
112 | raise 15; /* invoked by TRAP #0, for sys call */ | |
f0b5d12f | 113 | sp = EX_SCRATCH_REG; |
1394f032 | 114 | rtx |
51be24c3 | 115 | ENDPROC(_ex_syscall) |
1394f032 | 116 | |
1394f032 BW |
117 | ENTRY(_ex_soft_bp) |
118 | r7 = retx; | |
119 | r7 += -2; | |
120 | retx = r7; | |
121 | jump.s _ex_trap_c; | |
51be24c3 | 122 | ENDPROC(_ex_soft_bp) |
1394f032 BW |
123 | |
124 | ENTRY(_ex_single_step) | |
125 | r7 = retx; | |
126 | r6 = reti; | |
127 | cc = r7 == r6; | |
128 | if cc jump _return_from_exception | |
129 | r7 = syscfg; | |
130 | bitclr (r7, 0); | |
131 | syscfg = R7; | |
132 | ||
133 | p5.l = lo(IPEND); | |
134 | p5.h = hi(IPEND); | |
135 | r6 = [p5]; | |
136 | cc = bittst(r6, 5); | |
137 | if !cc jump _ex_trap_c; | |
138 | p4.l = lo(EVT5); | |
139 | p4.h = hi(EVT5); | |
140 | r6.h = _exception_to_level5; | |
141 | r6.l = _exception_to_level5; | |
142 | r7 = [p4]; | |
143 | cc = r6 == r7; | |
144 | if !cc jump _ex_trap_c; | |
145 | ||
1ffe6646 | 146 | ENTRY(_return_from_exception) |
669b792c | 147 | DEBUG_START_HWTRACE(p5, r7) |
1aafd909 | 148 | #if ANOMALY_05000257 |
8af10b79 MH |
149 | R7=LC0; |
150 | LC0=R7; | |
151 | R7=LC1; | |
152 | LC1=R7; | |
153 | #endif | |
1394f032 BW |
154 | (R7:6,P5:4) = [sp++]; |
155 | ASTAT = [sp++]; | |
f0b5d12f | 156 | sp = EX_SCRATCH_REG; |
1394f032 | 157 | rtx; |
51be24c3 | 158 | ENDPROC(_ex_soft_bp) |
1394f032 BW |
159 | |
160 | ENTRY(_handle_bad_cplb) | |
161 | /* To get here, we just tried and failed to change a CPLB | |
162 | * so, handle things in trap_c (C code), by lowering to | |
163 | * IRQ5, just like we normally do. Since this is not a | |
164 | * "normal" return path, we have a do alot of stuff to | |
165 | * the stack to get ready so, we can fall through - we | |
166 | * need to make a CPLB exception look like a normal exception | |
167 | */ | |
168 | ||
669b792c | 169 | DEBUG_START_HWTRACE(p5, r7) |
1394f032 BW |
170 | RESTORE_ALL_SYS |
171 | [--sp] = ASTAT; | |
172 | [--sp] = (R7:6, P5:4); | |
173 | ||
1ffe6646 MF |
174 | ENTRY(_ex_replaceable) |
175 | nop; | |
176 | ||
1394f032 BW |
177 | ENTRY(_ex_trap_c) |
178 | /* Call C code (trap_c) to handle the exception, which most | |
179 | * likely involves sending a signal to the current process. | |
180 | * To avoid double faults, lower our priority to IRQ5 first. | |
181 | */ | |
182 | P5.h = _exception_to_level5; | |
183 | P5.l = _exception_to_level5; | |
184 | p4.l = lo(EVT5); | |
185 | p4.h = hi(EVT5); | |
186 | [p4] = p5; | |
187 | csync; | |
188 | ||
189 | /* Disable all interrupts, but make sure level 5 is enabled so | |
190 | * we can switch to that level. Save the old mask. */ | |
191 | cli r6; | |
192 | p4.l = _excpt_saved_imask; | |
193 | p4.h = _excpt_saved_imask; | |
194 | [p4] = r6; | |
195 | r6 = 0x3f; | |
196 | sti r6; | |
197 | ||
198 | /* Save the excause into a circular buffer, in case the instruction | |
199 | * which caused this excecptions causes others. | |
200 | */ | |
201 | P5.l = _in_ptr_excause; | |
202 | P5.h = _in_ptr_excause; | |
203 | R7 = [P5]; | |
204 | R7 += 4; | |
205 | R6 = 0xF; | |
206 | R7 = R7 & R6; | |
207 | [P5] = R7; | |
208 | R6.l = _excause_circ_buf; | |
209 | R6.h = _excause_circ_buf; | |
210 | R7 = R7 + R6; | |
211 | p5 = R7; | |
212 | R6 = SEQSTAT; | |
213 | [P5] = R6; | |
214 | ||
669b792c | 215 | DEBUG_START_HWTRACE(p5, r7) |
1394f032 BW |
216 | (R7:6,P5:4) = [sp++]; |
217 | ASTAT = [sp++]; | |
f0b5d12f | 218 | SP = EX_SCRATCH_REG; |
1394f032 BW |
219 | raise 5; |
220 | rtx; | |
51be24c3 | 221 | ENDPROC(_ex_trap_c) |
1394f032 BW |
222 | |
223 | ENTRY(_exception_to_level5) | |
224 | SAVE_ALL_SYS | |
225 | ||
226 | /* Restore interrupt mask. We haven't pushed RETI, so this | |
227 | * doesn't enable interrupts until we return from this handler. */ | |
228 | p4.l = _excpt_saved_imask; | |
229 | p4.h = _excpt_saved_imask; | |
230 | r6 = [p4]; | |
231 | sti r6; | |
232 | ||
233 | /* Restore the hardware error vector. */ | |
234 | P5.h = _evt_ivhw; | |
235 | P5.l = _evt_ivhw; | |
236 | p4.l = lo(EVT5); | |
237 | p4.h = hi(EVT5); | |
238 | [p4] = p5; | |
239 | csync; | |
240 | ||
241 | p2.l = lo(IPEND); | |
242 | p2.h = hi(IPEND); | |
243 | csync; | |
244 | r0 = [p2]; /* Read current IPEND */ | |
245 | [sp + PT_IPEND] = r0; /* Store IPEND */ | |
246 | ||
247 | /* Pop the excause from the circular buffer and push it on the stack | |
248 | * (in the right place - if you change the location of SEQSTAT, you | |
249 | * must change this offset. | |
250 | */ | |
251 | .L_excep_to_5_again: | |
252 | P5.l = _out_ptr_excause; | |
253 | P5.h = _out_ptr_excause; | |
254 | R7 = [P5]; | |
255 | R7 += 4; | |
256 | R6 = 0xF; | |
257 | R7 = R7 & R6; | |
258 | [P5] = R7; | |
259 | R6.l = _excause_circ_buf; | |
260 | R6.h = _excause_circ_buf; | |
261 | R7 = R7 + R6; | |
262 | P5 = R7; | |
263 | R1 = [P5]; | |
264 | [SP + 8] = r1; | |
265 | ||
266 | r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ | |
267 | SP += -12; | |
268 | call _trap_c; | |
269 | SP += 12; | |
270 | ||
271 | /* See if anything else is in the exception buffer | |
272 | * if there is, process it | |
273 | */ | |
274 | P5.l = _out_ptr_excause; | |
275 | P5.h = _out_ptr_excause; | |
276 | P4.l = _in_ptr_excause; | |
277 | P4.h = _in_ptr_excause; | |
278 | R6 = [P5]; | |
279 | R7 = [P4]; | |
280 | CC = R6 == R7; | |
281 | if ! CC JUMP .L_excep_to_5_again | |
282 | ||
283 | call _ret_from_exception; | |
284 | RESTORE_ALL_SYS | |
285 | rti; | |
51be24c3 | 286 | ENDPROC(_exception_to_level5) |
1394f032 BW |
287 | |
288 | ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ | |
289 | /* Since the kernel stack can be anywhere, it's not guaranteed to be | |
290 | * covered by a CPLB. Switch to an exception stack; use RETN as a | |
291 | * scratch register (for want of a better option). | |
292 | */ | |
f0b5d12f | 293 | EX_SCRATCH_REG = sp; |
1394f032 BW |
294 | sp.l = _exception_stack_top; |
295 | sp.h = _exception_stack_top; | |
296 | /* Try to deal with syscalls quickly. */ | |
297 | [--sp] = ASTAT; | |
298 | [--sp] = (R7:6, P5:4); | |
669b792c | 299 | DEBUG_STOP_HWTRACE(p5, r7) |
1394f032 BW |
300 | r7 = SEQSTAT; /* reason code is in bit 5:0 */ |
301 | r6.l = lo(SEQSTAT_EXCAUSE); | |
302 | r6.h = hi(SEQSTAT_EXCAUSE); | |
303 | r7 = r7 & r6; | |
1ffe6646 MF |
304 | p5.h = _ex_table; |
305 | p5.l = _ex_table; | |
1394f032 BW |
306 | p4 = r7; |
307 | p5 = p5 + (p4 << 2); | |
308 | p4 = [p5]; | |
309 | jump (p4); | |
310 | ||
311 | .Lbadsys: | |
312 | r7 = -ENOSYS; /* signextending enough */ | |
313 | [sp + PT_R0] = r7; /* return value from system call */ | |
314 | jump .Lsyscall_really_exit; | |
51be24c3 | 315 | ENDPROC(_trap) |
1394f032 BW |
316 | |
317 | ENTRY(_kernel_execve) | |
318 | link SIZEOF_PTREGS; | |
319 | p0 = sp; | |
320 | r3 = SIZEOF_PTREGS / 4; | |
321 | r4 = 0(x); | |
322 | 0: | |
323 | [p0++] = r4; | |
324 | r3 += -1; | |
325 | cc = r3 == 0; | |
326 | if !cc jump 0b (bp); | |
327 | ||
328 | p0 = sp; | |
329 | sp += -16; | |
330 | [sp + 12] = p0; | |
331 | call _do_execve; | |
332 | SP += 16; | |
333 | cc = r0 == 0; | |
334 | if ! cc jump 1f; | |
335 | /* Success. Copy our temporary pt_regs to the top of the kernel | |
336 | * stack and do a normal exception return. | |
337 | */ | |
338 | r1 = sp; | |
339 | r0 = (-KERNEL_STACK_SIZE) (x); | |
340 | r1 = r1 & r0; | |
341 | p2 = r1; | |
342 | p3 = [p2]; | |
343 | r0 = KERNEL_STACK_SIZE - 4 (z); | |
344 | p1 = r0; | |
345 | p1 = p1 + p2; | |
346 | ||
347 | p0 = fp; | |
348 | r4 = [p0--]; | |
349 | r3 = SIZEOF_PTREGS / 4; | |
350 | 0: | |
351 | r4 = [p0--]; | |
352 | [p1--] = r4; | |
353 | r3 += -1; | |
354 | cc = r3 == 0; | |
355 | if ! cc jump 0b (bp); | |
356 | ||
357 | r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z); | |
358 | p1 = r0; | |
359 | p1 = p1 + p2; | |
360 | sp = p1; | |
361 | r0 = syscfg; | |
362 | [SP + PT_SYSCFG] = r0; | |
363 | [p3 + (TASK_THREAD + THREAD_KSP)] = sp; | |
364 | ||
365 | RESTORE_CONTEXT; | |
366 | rti; | |
367 | 1: | |
368 | unlink; | |
369 | rts; | |
51be24c3 | 370 | ENDPROC(_kernel_execve) |
1394f032 BW |
371 | |
372 | ENTRY(_system_call) | |
373 | /* Store IPEND */ | |
374 | p2.l = lo(IPEND); | |
375 | p2.h = hi(IPEND); | |
376 | csync; | |
377 | r0 = [p2]; | |
378 | [sp + PT_IPEND] = r0; | |
379 | ||
380 | /* Store RETS for now */ | |
381 | r0 = rets; | |
382 | [sp + PT_RESERVED] = r0; | |
383 | /* Set the stack for the current process */ | |
384 | r7 = sp; | |
385 | r6.l = lo(ALIGN_PAGE_MASK); | |
386 | r6.h = hi(ALIGN_PAGE_MASK); | |
387 | r7 = r7 & r6; /* thread_info */ | |
388 | p2 = r7; | |
389 | p2 = [p2]; | |
390 | ||
391 | [p2+(TASK_THREAD+THREAD_KSP)] = sp; | |
392 | ||
393 | /* Check the System Call */ | |
394 | r7 = __NR_syscall; | |
395 | /* System call number is passed in P0 */ | |
396 | r6 = p0; | |
397 | cc = r6 < r7; | |
398 | if ! cc jump .Lbadsys; | |
399 | ||
400 | /* are we tracing syscalls?*/ | |
401 | r7 = sp; | |
402 | r6.l = lo(ALIGN_PAGE_MASK); | |
403 | r6.h = hi(ALIGN_PAGE_MASK); | |
404 | r7 = r7 & r6; | |
405 | p2 = r7; | |
406 | r7 = [p2+TI_FLAGS]; | |
407 | CC = BITTST(r7,TIF_SYSCALL_TRACE); | |
408 | if CC JUMP _sys_trace; | |
409 | ||
410 | /* Execute the appropriate system call */ | |
411 | ||
412 | p4 = p0; | |
413 | p5.l = _sys_call_table; | |
414 | p5.h = _sys_call_table; | |
415 | p5 = p5 + (p4 << 2); | |
416 | r0 = [sp + PT_R0]; | |
417 | r1 = [sp + PT_R1]; | |
418 | r2 = [sp + PT_R2]; | |
419 | p5 = [p5]; | |
420 | ||
421 | [--sp] = r5; | |
422 | [--sp] = r4; | |
423 | [--sp] = r3; | |
424 | SP += -12; | |
425 | call (p5); | |
426 | SP += 24; | |
427 | [sp + PT_R0] = r0; | |
428 | ||
429 | .Lresume_userspace: | |
430 | r7 = sp; | |
431 | r4.l = lo(ALIGN_PAGE_MASK); | |
432 | r4.h = hi(ALIGN_PAGE_MASK); | |
433 | r7 = r7 & r4; /* thread_info->flags */ | |
434 | p5 = r7; | |
435 | .Lresume_userspace_1: | |
436 | /* Disable interrupts. */ | |
437 | [--sp] = reti; | |
438 | reti = [sp++]; | |
439 | ||
440 | r7 = [p5 + TI_FLAGS]; | |
441 | r4.l = lo(_TIF_WORK_MASK); | |
442 | r4.h = hi(_TIF_WORK_MASK); | |
443 | r7 = r7 & r4; | |
444 | ||
445 | .Lsyscall_resched: | |
446 | cc = BITTST(r7, TIF_NEED_RESCHED); | |
447 | if !cc jump .Lsyscall_sigpending; | |
448 | ||
449 | /* Reenable interrupts. */ | |
450 | [--sp] = reti; | |
451 | r0 = [sp++]; | |
452 | ||
453 | SP += -12; | |
454 | call _schedule; | |
455 | SP += 12; | |
456 | ||
457 | jump .Lresume_userspace_1; | |
458 | ||
459 | .Lsyscall_sigpending: | |
460 | cc = BITTST(r7, TIF_RESTORE_SIGMASK); | |
461 | if cc jump .Lsyscall_do_signals; | |
462 | cc = BITTST(r7, TIF_SIGPENDING); | |
463 | if !cc jump .Lsyscall_really_exit; | |
464 | .Lsyscall_do_signals: | |
465 | /* Reenable interrupts. */ | |
466 | [--sp] = reti; | |
467 | r0 = [sp++]; | |
468 | ||
469 | r0 = sp; | |
470 | SP += -12; | |
471 | call _do_signal; | |
472 | SP += 12; | |
473 | ||
474 | .Lsyscall_really_exit: | |
475 | r5 = [sp + PT_RESERVED]; | |
476 | rets = r5; | |
477 | rts; | |
51be24c3 | 478 | ENDPROC(_system_call) |
1394f032 BW |
479 | |
480 | _sys_trace: | |
481 | call _syscall_trace; | |
482 | ||
483 | /* Execute the appropriate system call */ | |
484 | ||
485 | p4 = [SP + PT_P0]; | |
486 | p5.l = _sys_call_table; | |
487 | p5.h = _sys_call_table; | |
488 | p5 = p5 + (p4 << 2); | |
489 | r0 = [sp + PT_R0]; | |
490 | r1 = [sp + PT_R1]; | |
491 | r2 = [sp + PT_R2]; | |
492 | r3 = [sp + PT_R3]; | |
493 | r4 = [sp + PT_R4]; | |
494 | r5 = [sp + PT_R5]; | |
495 | p5 = [p5]; | |
496 | ||
497 | [--sp] = r5; | |
498 | [--sp] = r4; | |
499 | [--sp] = r3; | |
500 | SP += -12; | |
501 | call (p5); | |
502 | SP += 24; | |
503 | [sp + PT_R0] = r0; | |
504 | ||
505 | call _syscall_trace; | |
506 | jump .Lresume_userspace; | |
51be24c3 | 507 | ENDPROC(_sys_trace) |
1394f032 BW |
508 | |
509 | ENTRY(_resume) | |
510 | /* | |
511 | * Beware - when entering resume, prev (the current task) is | |
512 | * in r0, next (the new task) is in r1. | |
513 | */ | |
514 | p0 = r0; | |
515 | p1 = r1; | |
516 | [--sp] = rets; | |
517 | [--sp] = fp; | |
518 | [--sp] = (r7:4, p5:3); | |
519 | ||
520 | /* save usp */ | |
521 | p2 = usp; | |
522 | [p0+(TASK_THREAD+THREAD_USP)] = p2; | |
523 | ||
524 | /* save current kernel stack pointer */ | |
525 | [p0+(TASK_THREAD+THREAD_KSP)] = sp; | |
526 | ||
527 | /* save program counter */ | |
528 | r1.l = _new_old_task; | |
529 | r1.h = _new_old_task; | |
530 | [p0+(TASK_THREAD+THREAD_PC)] = r1; | |
531 | ||
532 | /* restore the kernel stack pointer */ | |
533 | sp = [p1+(TASK_THREAD+THREAD_KSP)]; | |
534 | ||
535 | /* restore user stack pointer */ | |
536 | p0 = [p1+(TASK_THREAD+THREAD_USP)]; | |
537 | usp = p0; | |
538 | ||
539 | /* restore pc */ | |
540 | p0 = [p1+(TASK_THREAD+THREAD_PC)]; | |
541 | jump (p0); | |
542 | ||
543 | /* | |
544 | * Following code actually lands up in a new (old) task. | |
545 | */ | |
546 | ||
547 | _new_old_task: | |
548 | (r7:4, p5:3) = [sp++]; | |
549 | fp = [sp++]; | |
550 | rets = [sp++]; | |
551 | ||
552 | /* | |
553 | * When we come out of resume, r0 carries "old" task, becuase we are | |
554 | * in "new" task. | |
555 | */ | |
556 | rts; | |
51be24c3 | 557 | ENDPROC(_resume) |
1394f032 BW |
558 | |
559 | ENTRY(_ret_from_exception) | |
560 | p2.l = lo(IPEND); | |
561 | p2.h = hi(IPEND); | |
562 | ||
563 | csync; | |
564 | r0 = [p2]; | |
565 | [sp + PT_IPEND] = r0; | |
566 | ||
567 | 1: | |
568 | r1 = 0x37(Z); | |
569 | r2 = ~r1; | |
570 | r2.h = 0; | |
571 | r0 = r2 & r0; | |
572 | cc = r0 == 0; | |
573 | if !cc jump 4f; /* if not return to user mode, get out */ | |
574 | ||
575 | /* Make sure any pending system call or deferred exception | |
576 | * return in ILAT for this process to get executed, otherwise | |
577 | * in case context switch happens, system call of | |
578 | * first process (i.e in ILAT) will be carried | |
579 | * forward to the switched process | |
580 | */ | |
581 | ||
582 | p2.l = lo(ILAT); | |
583 | p2.h = hi(ILAT); | |
584 | r0 = [p2]; | |
585 | r1 = (EVT_IVG14 | EVT_IVG15) (z); | |
586 | r0 = r0 & r1; | |
587 | cc = r0 == 0; | |
588 | if !cc jump 5f; | |
589 | ||
590 | /* Set the stack for the current process */ | |
591 | r7 = sp; | |
592 | r4.l = lo(ALIGN_PAGE_MASK); | |
593 | r4.h = hi(ALIGN_PAGE_MASK); | |
594 | r7 = r7 & r4; /* thread_info->flags */ | |
595 | p5 = r7; | |
596 | r7 = [p5 + TI_FLAGS]; | |
597 | r4.l = lo(_TIF_WORK_MASK); | |
598 | r4.h = hi(_TIF_WORK_MASK); | |
599 | r7 = r7 & r4; | |
600 | cc = r7 == 0; | |
601 | if cc jump 4f; | |
602 | ||
603 | p0.l = lo(EVT15); | |
604 | p0.h = hi(EVT15); | |
605 | p1.l = _schedule_and_signal; | |
606 | p1.h = _schedule_and_signal; | |
607 | [p0] = p1; | |
608 | csync; | |
609 | raise 15; /* raise evt14 to do signal or reschedule */ | |
610 | 4: | |
611 | r0 = syscfg; | |
612 | bitclr(r0, 0); | |
613 | syscfg = r0; | |
614 | 5: | |
615 | rts; | |
51be24c3 | 616 | ENDPROC(_ret_from_exception) |
1394f032 BW |
617 | |
618 | ENTRY(_return_from_int) | |
619 | /* If someone else already raised IRQ 15, do nothing. */ | |
620 | csync; | |
621 | p2.l = lo(ILAT); | |
622 | p2.h = hi(ILAT); | |
623 | r0 = [p2]; | |
624 | cc = bittst (r0, EVT_IVG15_P); | |
625 | if cc jump 2f; | |
626 | ||
627 | /* if not return to user mode, get out */ | |
628 | p2.l = lo(IPEND); | |
629 | p2.h = hi(IPEND); | |
630 | r0 = [p2]; | |
631 | r1 = 0x17(Z); | |
632 | r2 = ~r1; | |
633 | r2.h = 0; | |
634 | r0 = r2 & r0; | |
635 | r1 = 1; | |
636 | r1 = r0 - r1; | |
637 | r2 = r0 & r1; | |
638 | cc = r2 == 0; | |
639 | if !cc jump 2f; | |
640 | ||
641 | /* Lower the interrupt level to 15. */ | |
642 | p0.l = lo(EVT15); | |
643 | p0.h = hi(EVT15); | |
644 | p1.l = _schedule_and_signal_from_int; | |
645 | p1.h = _schedule_and_signal_from_int; | |
646 | [p0] = p1; | |
647 | csync; | |
1aafd909 | 648 | #if ANOMALY_05000281 |
1394f032 BW |
649 | r0.l = lo(CONFIG_BOOT_LOAD); |
650 | r0.h = hi(CONFIG_BOOT_LOAD); | |
651 | reti = r0; | |
652 | #endif | |
653 | r0 = 0x801f (z); | |
654 | STI r0; | |
655 | raise 15; /* raise evt15 to do signal or reschedule */ | |
656 | rti; | |
657 | 2: | |
658 | rts; | |
51be24c3 | 659 | ENDPROC(_return_from_int) |
1394f032 BW |
660 | |
661 | ENTRY(_lower_to_irq14) | |
1aafd909 | 662 | #if ANOMALY_05000281 |
1394f032 BW |
663 | r0.l = lo(CONFIG_BOOT_LOAD); |
664 | r0.h = hi(CONFIG_BOOT_LOAD); | |
665 | reti = r0; | |
666 | #endif | |
667 | r0 = 0x401f; | |
668 | sti r0; | |
669 | raise 14; | |
670 | rti; | |
671 | ENTRY(_evt14_softirq) | |
672 | #ifdef CONFIG_DEBUG_HWERR | |
673 | r0 = 0x3f; | |
674 | sti r0; | |
675 | #else | |
676 | cli r0; | |
677 | #endif | |
678 | [--sp] = RETI; | |
679 | SP += 4; | |
680 | rts; | |
681 | ||
682 | _schedule_and_signal_from_int: | |
683 | /* To end up here, vector 15 was changed - so we have to change it | |
684 | * back. | |
685 | */ | |
686 | p0.l = lo(EVT15); | |
687 | p0.h = hi(EVT15); | |
688 | p1.l = _evt_system_call; | |
689 | p1.h = _evt_system_call; | |
690 | [p0] = p1; | |
691 | csync; | |
c824498d BS |
692 | |
693 | /* Set orig_p0 to -1 to indicate this isn't the end of a syscall. */ | |
694 | r0 = -1 (x); | |
695 | [sp + PT_ORIG_P0] = r0; | |
696 | ||
1394f032 BW |
697 | p1 = rets; |
698 | [sp + PT_RESERVED] = p1; | |
699 | ||
700 | p0.l = _irq_flags; | |
701 | p0.h = _irq_flags; | |
702 | r0 = [p0]; | |
703 | sti r0; | |
704 | ||
7adfb58f BS |
705 | r0 = sp; |
706 | sp += -12; | |
707 | call _finish_atomic_sections; | |
708 | sp += 12; | |
1394f032 BW |
709 | jump.s .Lresume_userspace; |
710 | ||
711 | _schedule_and_signal: | |
712 | SAVE_CONTEXT_SYSCALL | |
713 | /* To end up here, vector 15 was changed - so we have to change it | |
714 | * back. | |
715 | */ | |
716 | p0.l = lo(EVT15); | |
717 | p0.h = hi(EVT15); | |
718 | p1.l = _evt_system_call; | |
719 | p1.h = _evt_system_call; | |
720 | [p0] = p1; | |
721 | csync; | |
722 | p0.l = 1f; | |
723 | p0.h = 1f; | |
724 | [sp + PT_RESERVED] = P0; | |
725 | call .Lresume_userspace; | |
726 | 1: | |
727 | RESTORE_CONTEXT | |
728 | rti; | |
51be24c3 | 729 | ENDPROC(_lower_to_irq14) |
1394f032 BW |
730 | |
731 | /* Make sure when we start, that the circular buffer is initialized properly | |
732 | * R0 and P0 are call clobbered, so we can use them here. | |
733 | */ | |
734 | ENTRY(_init_exception_buff) | |
735 | r0 = 0; | |
736 | p0.h = _in_ptr_excause; | |
737 | p0.l = _in_ptr_excause; | |
738 | [p0] = r0; | |
739 | p0.h = _out_ptr_excause; | |
740 | p0.l = _out_ptr_excause; | |
741 | [p0] = r0; | |
742 | rts; | |
51be24c3 | 743 | ENDPROC(_init_exception_buff) |
1394f032 | 744 | |
518039bc RG |
745 | /* We handle this 100% in exception space - to reduce overhead |
746 | * Only potiential problem is if the software buffer gets swapped out of the | |
747 | * CPLB table - then double fault. - so we don't let this happen in other places | |
748 | */ | |
749 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | |
750 | ENTRY(_ex_trace_buff_full) | |
751 | [--sp] = P3; | |
752 | [--sp] = P2; | |
753 | [--sp] = LC0; | |
754 | [--sp] = LT0; | |
755 | [--sp] = LB0; | |
756 | P5.L = _trace_buff_offset; | |
757 | P5.H = _trace_buff_offset; | |
758 | P3 = [P5]; /* trace_buff_offset */ | |
759 | P5.L = lo(TBUFSTAT); | |
760 | P5.H = hi(TBUFSTAT); | |
761 | R7 = [P5]; | |
762 | R7 <<= 1; /* double, since we need to read twice */ | |
763 | LC0 = R7; | |
764 | R7 <<= 2; /* need to shift over again, | |
765 | * to get the number of bytes */ | |
766 | P5.L = lo(TBUF); | |
767 | P5.H = hi(TBUF); | |
768 | R6 = ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*1024) - 1; | |
769 | ||
770 | P2 = R7; | |
771 | P3 = P3 + P2; | |
772 | R7 = P3; | |
773 | R7 = R7 & R6; | |
774 | P3 = R7; | |
775 | P2.L = _trace_buff_offset; | |
776 | P2.H = _trace_buff_offset; | |
777 | [P2] = P3; | |
778 | ||
779 | P2.L = _software_trace_buff; | |
780 | P2.H = _software_trace_buff; | |
781 | ||
782 | LSETUP (.Lstart, .Lend) LC0; | |
783 | .Lstart: | |
784 | R7 = [P5]; /* read TBUF */ | |
785 | P4 = P3 + P2; | |
786 | [P4] = R7; | |
787 | P3 += -4; | |
788 | R7 = P3; | |
789 | R7 = R7 & R6; | |
790 | .Lend: | |
791 | P3 = R7; | |
792 | ||
793 | LB0 = [sp++]; | |
794 | LT0 = [sp++]; | |
795 | LC0 = [sp++]; | |
796 | P2 = [sp++]; | |
797 | P3 = [sp++]; | |
798 | jump _return_from_exception; | |
799 | ||
800 | #if CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN == 4 | |
801 | .data | |
802 | #else | |
803 | .section .l1.data.B | |
804 | #endif | |
805 | ENTRY(_trace_buff_offset) | |
806 | .long 0; | |
807 | ALIGN | |
808 | ENTRY(_software_trace_buff) | |
809 | .rept ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*256); | |
810 | .long 0 | |
811 | .endr | |
812 | #endif | |
813 | ||
1394f032 BW |
814 | /* |
815 | * Put these in the kernel data section - that should always be covered by | |
816 | * a CPLB. This is needed to ensure we don't get double fault conditions | |
817 | */ | |
818 | ||
819 | #ifdef CONFIG_SYSCALL_TAB_L1 | |
820 | .section .l1.data | |
821 | #else | |
822 | .data | |
823 | #endif | |
1ffe6646 | 824 | ENTRY(_ex_table) |
1394f032 | 825 | /* entry for each EXCAUSE[5:0] |
9401e618 | 826 | * This table must be in sync with the table in ./kernel/traps.c |
1394f032 BW |
827 | * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined |
828 | */ | |
1ffe6646 | 829 | .long _ex_syscall /* 0x00 - User Defined - Linux Syscall */ |
1394f032 | 830 | .long _ex_soft_bp /* 0x01 - User Defined - Software breakpoint */ |
1ffe6646 | 831 | .long _ex_replaceable /* 0x02 - User Defined */ |
9401e618 | 832 | .long _ex_trap_c /* 0x03 - User Defined - userspace stack overflow */ |
1ffe6646 MF |
833 | .long _ex_replaceable /* 0x04 - User Defined */ |
834 | .long _ex_replaceable /* 0x05 - User Defined */ | |
835 | .long _ex_replaceable /* 0x06 - User Defined */ | |
836 | .long _ex_replaceable /* 0x07 - User Defined */ | |
837 | .long _ex_replaceable /* 0x08 - User Defined */ | |
838 | .long _ex_replaceable /* 0x09 - User Defined */ | |
839 | .long _ex_replaceable /* 0x0A - User Defined */ | |
840 | .long _ex_replaceable /* 0x0B - User Defined */ | |
841 | .long _ex_replaceable /* 0x0C - User Defined */ | |
842 | .long _ex_replaceable /* 0x0D - User Defined */ | |
843 | .long _ex_replaceable /* 0x0E - User Defined */ | |
844 | .long _ex_replaceable /* 0x0F - User Defined */ | |
1394f032 | 845 | .long _ex_single_step /* 0x10 - HW Single step */ |
518039bc RG |
846 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND |
847 | .long _ex_trace_buff_full /* 0x11 - Trace Buffer Full */ | |
848 | #else | |
1394f032 | 849 | .long _ex_trap_c /* 0x11 - Trace Buffer Full */ |
518039bc | 850 | #endif |
1394f032 BW |
851 | .long _ex_trap_c /* 0x12 - Reserved */ |
852 | .long _ex_trap_c /* 0x13 - Reserved */ | |
853 | .long _ex_trap_c /* 0x14 - Reserved */ | |
854 | .long _ex_trap_c /* 0x15 - Reserved */ | |
855 | .long _ex_trap_c /* 0x16 - Reserved */ | |
856 | .long _ex_trap_c /* 0x17 - Reserved */ | |
857 | .long _ex_trap_c /* 0x18 - Reserved */ | |
858 | .long _ex_trap_c /* 0x19 - Reserved */ | |
859 | .long _ex_trap_c /* 0x1A - Reserved */ | |
860 | .long _ex_trap_c /* 0x1B - Reserved */ | |
861 | .long _ex_trap_c /* 0x1C - Reserved */ | |
862 | .long _ex_trap_c /* 0x1D - Reserved */ | |
863 | .long _ex_trap_c /* 0x1E - Reserved */ | |
864 | .long _ex_trap_c /* 0x1F - Reserved */ | |
865 | .long _ex_trap_c /* 0x20 - Reserved */ | |
866 | .long _ex_trap_c /* 0x21 - Undefined Instruction */ | |
867 | .long _ex_trap_c /* 0x22 - Illegal Instruction Combination */ | |
868 | .long _ex_dcplb /* 0x23 - Data CPLB Protection Violation */ | |
869 | .long _ex_trap_c /* 0x24 - Data access misaligned */ | |
870 | .long _ex_trap_c /* 0x25 - Unrecoverable Event */ | |
871 | .long _ex_dcplb /* 0x26 - Data CPLB Miss */ | |
872 | .long _ex_trap_c /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */ | |
873 | .long _ex_trap_c /* 0x28 - Emulation Watchpoint */ | |
874 | .long _ex_trap_c /* 0x29 - Instruction fetch access error (535 only) */ | |
875 | .long _ex_trap_c /* 0x2A - Instruction fetch misaligned */ | |
876 | .long _ex_icplb /* 0x2B - Instruction CPLB protection Violation */ | |
877 | .long _ex_icplb /* 0x2C - Instruction CPLB miss */ | |
878 | .long _ex_trap_c /* 0x2D - Instruction CPLB Multiple Hits */ | |
879 | .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */ | |
880 | .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */ | |
881 | .long _ex_trap_c /* 0x2F - Reserved */ | |
882 | .long _ex_trap_c /* 0x30 - Reserved */ | |
883 | .long _ex_trap_c /* 0x31 - Reserved */ | |
884 | .long _ex_trap_c /* 0x32 - Reserved */ | |
885 | .long _ex_trap_c /* 0x33 - Reserved */ | |
886 | .long _ex_trap_c /* 0x34 - Reserved */ | |
887 | .long _ex_trap_c /* 0x35 - Reserved */ | |
888 | .long _ex_trap_c /* 0x36 - Reserved */ | |
889 | .long _ex_trap_c /* 0x37 - Reserved */ | |
890 | .long _ex_trap_c /* 0x38 - Reserved */ | |
891 | .long _ex_trap_c /* 0x39 - Reserved */ | |
892 | .long _ex_trap_c /* 0x3A - Reserved */ | |
893 | .long _ex_trap_c /* 0x3B - Reserved */ | |
894 | .long _ex_trap_c /* 0x3C - Reserved */ | |
895 | .long _ex_trap_c /* 0x3D - Reserved */ | |
896 | .long _ex_trap_c /* 0x3E - Reserved */ | |
897 | .long _ex_trap_c /* 0x3F - Reserved */ | |
1ffe6646 | 898 | END(_ex_table) |
1394f032 | 899 | |
1394f032 | 900 | ENTRY(_sys_call_table) |
0b95f22b | 901 | .long _sys_restart_syscall /* 0 */ |
1394f032 BW |
902 | .long _sys_exit |
903 | .long _sys_fork | |
904 | .long _sys_read | |
905 | .long _sys_write | |
906 | .long _sys_open /* 5 */ | |
907 | .long _sys_close | |
908 | .long _sys_ni_syscall /* old waitpid */ | |
909 | .long _sys_creat | |
910 | .long _sys_link | |
911 | .long _sys_unlink /* 10 */ | |
912 | .long _sys_execve | |
913 | .long _sys_chdir | |
914 | .long _sys_time | |
915 | .long _sys_mknod | |
916 | .long _sys_chmod /* 15 */ | |
917 | .long _sys_chown /* chown16 */ | |
918 | .long _sys_ni_syscall /* old break syscall holder */ | |
919 | .long _sys_ni_syscall /* old stat */ | |
920 | .long _sys_lseek | |
921 | .long _sys_getpid /* 20 */ | |
922 | .long _sys_mount | |
923 | .long _sys_ni_syscall /* old umount */ | |
924 | .long _sys_setuid | |
925 | .long _sys_getuid | |
926 | .long _sys_stime /* 25 */ | |
927 | .long _sys_ptrace | |
928 | .long _sys_alarm | |
929 | .long _sys_ni_syscall /* old fstat */ | |
930 | .long _sys_pause | |
931 | .long _sys_ni_syscall /* old utime */ /* 30 */ | |
932 | .long _sys_ni_syscall /* old stty syscall holder */ | |
933 | .long _sys_ni_syscall /* old gtty syscall holder */ | |
934 | .long _sys_access | |
935 | .long _sys_nice | |
936 | .long _sys_ni_syscall /* 35 */ /* old ftime syscall holder */ | |
937 | .long _sys_sync | |
938 | .long _sys_kill | |
939 | .long _sys_rename | |
940 | .long _sys_mkdir | |
941 | .long _sys_rmdir /* 40 */ | |
942 | .long _sys_dup | |
943 | .long _sys_pipe | |
944 | .long _sys_times | |
945 | .long _sys_ni_syscall /* old prof syscall holder */ | |
946 | .long _sys_brk /* 45 */ | |
947 | .long _sys_setgid | |
948 | .long _sys_getgid | |
949 | .long _sys_ni_syscall /* old sys_signal */ | |
950 | .long _sys_geteuid /* geteuid16 */ | |
951 | .long _sys_getegid /* getegid16 */ /* 50 */ | |
952 | .long _sys_acct | |
953 | .long _sys_umount /* recycled never used phys() */ | |
954 | .long _sys_ni_syscall /* old lock syscall holder */ | |
955 | .long _sys_ioctl | |
956 | .long _sys_fcntl /* 55 */ | |
957 | .long _sys_ni_syscall /* old mpx syscall holder */ | |
958 | .long _sys_setpgid | |
959 | .long _sys_ni_syscall /* old ulimit syscall holder */ | |
960 | .long _sys_ni_syscall /* old old uname */ | |
961 | .long _sys_umask /* 60 */ | |
962 | .long _sys_chroot | |
963 | .long _sys_ustat | |
964 | .long _sys_dup2 | |
965 | .long _sys_getppid | |
966 | .long _sys_getpgrp /* 65 */ | |
967 | .long _sys_setsid | |
968 | .long _sys_ni_syscall /* old sys_sigaction */ | |
969 | .long _sys_sgetmask | |
970 | .long _sys_ssetmask | |
971 | .long _sys_setreuid /* setreuid16 */ /* 70 */ | |
972 | .long _sys_setregid /* setregid16 */ | |
973 | .long _sys_ni_syscall /* old sys_sigsuspend */ | |
974 | .long _sys_ni_syscall /* old sys_sigpending */ | |
975 | .long _sys_sethostname | |
976 | .long _sys_setrlimit /* 75 */ | |
977 | .long _sys_ni_syscall /* old getrlimit */ | |
978 | .long _sys_getrusage | |
979 | .long _sys_gettimeofday | |
980 | .long _sys_settimeofday | |
981 | .long _sys_getgroups /* getgroups16 */ /* 80 */ | |
982 | .long _sys_setgroups /* setgroups16 */ | |
983 | .long _sys_ni_syscall /* old_select */ | |
984 | .long _sys_symlink | |
985 | .long _sys_ni_syscall /* old lstat */ | |
986 | .long _sys_readlink /* 85 */ | |
987 | .long _sys_uselib | |
988 | .long _sys_ni_syscall /* sys_swapon */ | |
989 | .long _sys_reboot | |
990 | .long _sys_ni_syscall /* old_readdir */ | |
991 | .long _sys_ni_syscall /* sys_mmap */ /* 90 */ | |
992 | .long _sys_munmap | |
993 | .long _sys_truncate | |
994 | .long _sys_ftruncate | |
995 | .long _sys_fchmod | |
996 | .long _sys_fchown /* fchown16 */ /* 95 */ | |
997 | .long _sys_getpriority | |
998 | .long _sys_setpriority | |
999 | .long _sys_ni_syscall /* old profil syscall holder */ | |
1000 | .long _sys_statfs | |
1001 | .long _sys_fstatfs /* 100 */ | |
1002 | .long _sys_ni_syscall | |
1003 | .long _sys_ni_syscall /* old sys_socketcall */ | |
1004 | .long _sys_syslog | |
1005 | .long _sys_setitimer | |
1006 | .long _sys_getitimer /* 105 */ | |
1007 | .long _sys_newstat | |
1008 | .long _sys_newlstat | |
1009 | .long _sys_newfstat | |
1010 | .long _sys_ni_syscall /* old uname */ | |
1011 | .long _sys_ni_syscall /* iopl for i386 */ /* 110 */ | |
1012 | .long _sys_vhangup | |
1013 | .long _sys_ni_syscall /* obsolete idle() syscall */ | |
1014 | .long _sys_ni_syscall /* vm86old for i386 */ | |
1015 | .long _sys_wait4 | |
1016 | .long _sys_ni_syscall /* 115 */ /* sys_swapoff */ | |
1017 | .long _sys_sysinfo | |
1018 | .long _sys_ni_syscall /* old sys_ipc */ | |
1019 | .long _sys_fsync | |
1020 | .long _sys_ni_syscall /* old sys_sigreturn */ | |
1021 | .long _sys_clone /* 120 */ | |
1022 | .long _sys_setdomainname | |
1023 | .long _sys_newuname | |
1024 | .long _sys_ni_syscall /* old sys_modify_ldt */ | |
1025 | .long _sys_adjtimex | |
1026 | .long _sys_ni_syscall /* 125 */ /* sys_mprotect */ | |
1027 | .long _sys_ni_syscall /* old sys_sigprocmask */ | |
1028 | .long _sys_ni_syscall /* old "creat_module" */ | |
1029 | .long _sys_init_module | |
1030 | .long _sys_delete_module | |
1031 | .long _sys_ni_syscall /* 130: old "get_kernel_syms" */ | |
1032 | .long _sys_quotactl | |
1033 | .long _sys_getpgid | |
1034 | .long _sys_fchdir | |
1035 | .long _sys_bdflush | |
1036 | .long _sys_ni_syscall /* 135 */ /* sys_sysfs */ | |
1037 | .long _sys_personality | |
1038 | .long _sys_ni_syscall /* for afs_syscall */ | |
1039 | .long _sys_setfsuid /* setfsuid16 */ | |
1040 | .long _sys_setfsgid /* setfsgid16 */ | |
1041 | .long _sys_llseek /* 140 */ | |
1042 | .long _sys_getdents | |
1043 | .long _sys_ni_syscall /* sys_select */ | |
1044 | .long _sys_flock | |
1045 | .long _sys_ni_syscall /* sys_msync */ | |
1046 | .long _sys_readv /* 145 */ | |
1047 | .long _sys_writev | |
1048 | .long _sys_getsid | |
1049 | .long _sys_fdatasync | |
1050 | .long _sys_sysctl | |
1051 | .long _sys_ni_syscall /* 150 */ /* sys_mlock */ | |
1052 | .long _sys_ni_syscall /* sys_munlock */ | |
1053 | .long _sys_ni_syscall /* sys_mlockall */ | |
1054 | .long _sys_ni_syscall /* sys_munlockall */ | |
1055 | .long _sys_sched_setparam | |
1056 | .long _sys_sched_getparam /* 155 */ | |
1057 | .long _sys_sched_setscheduler | |
1058 | .long _sys_sched_getscheduler | |
1059 | .long _sys_sched_yield | |
1060 | .long _sys_sched_get_priority_max | |
1061 | .long _sys_sched_get_priority_min /* 160 */ | |
1062 | .long _sys_sched_rr_get_interval | |
1063 | .long _sys_nanosleep | |
0b95f22b | 1064 | .long _sys_mremap |
1394f032 BW |
1065 | .long _sys_setresuid /* setresuid16 */ |
1066 | .long _sys_getresuid /* getresuid16 */ /* 165 */ | |
1067 | .long _sys_ni_syscall /* for vm86 */ | |
1068 | .long _sys_ni_syscall /* old "query_module" */ | |
1069 | .long _sys_ni_syscall /* sys_poll */ | |
0b95f22b | 1070 | .long _sys_nfsservctl |
1394f032 BW |
1071 | .long _sys_setresgid /* setresgid16 */ /* 170 */ |
1072 | .long _sys_getresgid /* getresgid16 */ | |
1073 | .long _sys_prctl | |
1074 | .long _sys_rt_sigreturn | |
1075 | .long _sys_rt_sigaction | |
1076 | .long _sys_rt_sigprocmask /* 175 */ | |
1077 | .long _sys_rt_sigpending | |
1078 | .long _sys_rt_sigtimedwait | |
1079 | .long _sys_rt_sigqueueinfo | |
1080 | .long _sys_rt_sigsuspend | |
1081 | .long _sys_pread64 /* 180 */ | |
1082 | .long _sys_pwrite64 | |
1083 | .long _sys_lchown /* lchown16 */ | |
1084 | .long _sys_getcwd | |
1085 | .long _sys_capget | |
1086 | .long _sys_capset /* 185 */ | |
1087 | .long _sys_sigaltstack | |
1088 | .long _sys_sendfile | |
1089 | .long _sys_ni_syscall /* streams1 */ | |
1090 | .long _sys_ni_syscall /* streams2 */ | |
1091 | .long _sys_vfork /* 190 */ | |
1092 | .long _sys_getrlimit | |
1093 | .long _sys_mmap2 | |
1094 | .long _sys_truncate64 | |
1095 | .long _sys_ftruncate64 | |
1096 | .long _sys_stat64 /* 195 */ | |
1097 | .long _sys_lstat64 | |
1098 | .long _sys_fstat64 | |
1099 | .long _sys_chown | |
1100 | .long _sys_getuid | |
1101 | .long _sys_getgid /* 200 */ | |
1102 | .long _sys_geteuid | |
1103 | .long _sys_getegid | |
1104 | .long _sys_setreuid | |
1105 | .long _sys_setregid | |
1106 | .long _sys_getgroups /* 205 */ | |
1107 | .long _sys_setgroups | |
1108 | .long _sys_fchown | |
1109 | .long _sys_setresuid | |
1110 | .long _sys_getresuid | |
1111 | .long _sys_setresgid /* 210 */ | |
1112 | .long _sys_getresgid | |
1113 | .long _sys_lchown | |
1114 | .long _sys_setuid | |
1115 | .long _sys_setgid | |
1116 | .long _sys_setfsuid /* 215 */ | |
1117 | .long _sys_setfsgid | |
1118 | .long _sys_pivot_root | |
1119 | .long _sys_ni_syscall /* sys_mincore */ | |
1120 | .long _sys_ni_syscall /* sys_madvise */ | |
1121 | .long _sys_getdents64 /* 220 */ | |
1122 | .long _sys_fcntl64 | |
1123 | .long _sys_ni_syscall /* reserved for TUX */ | |
1124 | .long _sys_ni_syscall | |
1125 | .long _sys_gettid | |
0b95f22b | 1126 | .long _sys_readahead /* 225 */ |
1394f032 BW |
1127 | .long _sys_setxattr |
1128 | .long _sys_lsetxattr | |
1129 | .long _sys_fsetxattr | |
1130 | .long _sys_getxattr | |
1131 | .long _sys_lgetxattr /* 230 */ | |
1132 | .long _sys_fgetxattr | |
1133 | .long _sys_listxattr | |
1134 | .long _sys_llistxattr | |
1135 | .long _sys_flistxattr | |
1136 | .long _sys_removexattr /* 235 */ | |
1137 | .long _sys_lremovexattr | |
1138 | .long _sys_fremovexattr | |
1139 | .long _sys_tkill | |
1140 | .long _sys_sendfile64 | |
1141 | .long _sys_futex /* 240 */ | |
1142 | .long _sys_sched_setaffinity | |
1143 | .long _sys_sched_getaffinity | |
1144 | .long _sys_ni_syscall /* sys_set_thread_area */ | |
1145 | .long _sys_ni_syscall /* sys_get_thread_area */ | |
1146 | .long _sys_io_setup /* 245 */ | |
1147 | .long _sys_io_destroy | |
1148 | .long _sys_io_getevents | |
1149 | .long _sys_io_submit | |
1150 | .long _sys_io_cancel | |
1151 | .long _sys_ni_syscall /* 250 */ /* sys_alloc_hugepages */ | |
1152 | .long _sys_ni_syscall /* sys_freec_hugepages */ | |
1153 | .long _sys_exit_group | |
1154 | .long _sys_lookup_dcookie | |
1155 | .long _sys_bfin_spinlock | |
1156 | .long _sys_epoll_create /* 255 */ | |
1157 | .long _sys_epoll_ctl | |
1158 | .long _sys_epoll_wait | |
1159 | .long _sys_ni_syscall /* remap_file_pages */ | |
1160 | .long _sys_set_tid_address | |
1161 | .long _sys_timer_create /* 260 */ | |
1162 | .long _sys_timer_settime | |
1163 | .long _sys_timer_gettime | |
1164 | .long _sys_timer_getoverrun | |
1165 | .long _sys_timer_delete | |
1166 | .long _sys_clock_settime /* 265 */ | |
1167 | .long _sys_clock_gettime | |
1168 | .long _sys_clock_getres | |
1169 | .long _sys_clock_nanosleep | |
1170 | .long _sys_statfs64 | |
1171 | .long _sys_fstatfs64 /* 270 */ | |
1172 | .long _sys_tgkill | |
1173 | .long _sys_utimes | |
1174 | .long _sys_fadvise64_64 | |
1175 | .long _sys_ni_syscall /* vserver */ | |
1176 | .long _sys_ni_syscall /* 275, mbind */ | |
1177 | .long _sys_ni_syscall /* get_mempolicy */ | |
1178 | .long _sys_ni_syscall /* set_mempolicy */ | |
1179 | .long _sys_mq_open | |
1180 | .long _sys_mq_unlink | |
1181 | .long _sys_mq_timedsend /* 280 */ | |
1182 | .long _sys_mq_timedreceive | |
1183 | .long _sys_mq_notify | |
1184 | .long _sys_mq_getsetattr | |
1185 | .long _sys_ni_syscall /* kexec_load */ | |
1186 | .long _sys_waitid /* 285 */ | |
1187 | .long _sys_add_key | |
1188 | .long _sys_request_key | |
1189 | .long _sys_keyctl | |
1190 | .long _sys_ioprio_set | |
1191 | .long _sys_ioprio_get /* 290 */ | |
1192 | .long _sys_inotify_init | |
1193 | .long _sys_inotify_add_watch | |
1194 | .long _sys_inotify_rm_watch | |
1195 | .long _sys_ni_syscall /* migrate_pages */ | |
1196 | .long _sys_openat /* 295 */ | |
1197 | .long _sys_mkdirat | |
1198 | .long _sys_mknodat | |
1199 | .long _sys_fchownat | |
1200 | .long _sys_futimesat | |
1201 | .long _sys_fstatat64 /* 300 */ | |
1202 | .long _sys_unlinkat | |
1203 | .long _sys_renameat | |
1204 | .long _sys_linkat | |
1205 | .long _sys_symlinkat | |
1206 | .long _sys_readlinkat /* 305 */ | |
1207 | .long _sys_fchmodat | |
1208 | .long _sys_faccessat | |
1209 | .long _sys_pselect6 | |
1210 | .long _sys_ppoll | |
1211 | .long _sys_unshare /* 310 */ | |
1212 | .long _sys_sram_alloc | |
1213 | .long _sys_sram_free | |
1214 | .long _sys_dma_memcpy | |
1215 | .long _sys_accept | |
1216 | .long _sys_bind /* 315 */ | |
1217 | .long _sys_connect | |
1218 | .long _sys_getpeername | |
1219 | .long _sys_getsockname | |
1220 | .long _sys_getsockopt | |
1221 | .long _sys_listen /* 320 */ | |
1222 | .long _sys_recv | |
1223 | .long _sys_recvfrom | |
1224 | .long _sys_recvmsg | |
1225 | .long _sys_send | |
1226 | .long _sys_sendmsg /* 325 */ | |
1227 | .long _sys_sendto | |
1228 | .long _sys_setsockopt | |
1229 | .long _sys_shutdown | |
1230 | .long _sys_socket | |
1231 | .long _sys_socketpair /* 330 */ | |
1232 | .long _sys_semctl | |
1233 | .long _sys_semget | |
1234 | .long _sys_semop | |
1235 | .long _sys_msgctl | |
1236 | .long _sys_msgget /* 335 */ | |
1237 | .long _sys_msgrcv | |
1238 | .long _sys_msgsnd | |
1239 | .long _sys_shmat | |
1240 | .long _sys_shmctl | |
1241 | .long _sys_shmdt /* 340 */ | |
1242 | .long _sys_shmget | |
0b95f22b BW |
1243 | .long _sys_splice |
1244 | .long _sys_sync_file_range | |
1245 | .long _sys_tee | |
1246 | .long _sys_vmsplice /* 345 */ | |
1247 | .long _sys_epoll_pwait | |
1248 | .long _sys_utimensat | |
1249 | .long _sys_signalfd | |
1250 | .long _sys_timerfd | |
1251 | .long _sys_eventfd /* 350 */ | |
1252 | .long _sys_pread64 | |
1253 | .long _sys_pwrite64 | |
1254 | .long _sys_fadvise64 | |
1255 | .long _sys_set_robust_list | |
1256 | .long _sys_get_robust_list /* 355 */ | |
1257 | .long _sys_fallocate | |
1394f032 BW |
1258 | .rept NR_syscalls-(.-_sys_call_table)/4 |
1259 | .long _sys_ni_syscall | |
1260 | .endr | |
1261 | _excpt_saved_imask: | |
1262 | .long 0; | |
1263 | ||
1264 | _exception_stack: | |
1265 | .rept 1024 | |
1266 | .long 0; | |
1267 | .endr | |
1268 | _exception_stack_top: | |
1269 | ||
1aafd909 | 1270 | #if ANOMALY_05000261 |
1394f032 BW |
1271 | /* Used by the assembly entry point to work around an anomaly. */ |
1272 | _last_cplb_fault_retx: | |
1273 | .long 0; | |
1274 | #endif | |
1275 | /* | |
1276 | * Single instructions can have multiple faults, which need to be | |
1277 | * handled by traps.c, in irq5. We store the exception cause to ensure | |
1278 | * we don't miss a double fault condition | |
1279 | */ | |
1280 | ENTRY(_in_ptr_excause) | |
1281 | .long 0; | |
1282 | ENTRY(_out_ptr_excause) | |
1283 | .long 0; | |
1284 | ALIGN | |
1285 | ENTRY(_excause_circ_buf) | |
1286 | .rept 4 | |
1287 | .long 0 | |
1288 | .endr |