Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle | |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | |
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | |
9 | */ | |
10 | #include <linux/config.h> | |
11 | ||
12 | #include <asm/asm.h> | |
13 | #include <asm/asmmacro.h> | |
14 | #include <asm/regdef.h> | |
15 | #include <asm/mipsregs.h> | |
16 | #include <asm/stackframe.h> | |
17 | #include <asm/isadep.h> | |
18 | #include <asm/thread_info.h> | |
19 | #include <asm/war.h> | |
20 | ||
21 | #ifdef CONFIG_PREEMPT | |
22 | .macro preempt_stop reg=t0 | |
23 | .endm | |
24 | #else | |
25 | .macro preempt_stop reg=t0 | |
26 | local_irq_disable \reg | |
27 | .endm | |
28 | #define resume_kernel restore_all | |
29 | #endif | |
30 | ||
31 | .text | |
32 | .align 5 | |
33 | FEXPORT(ret_from_exception) | |
34 | preempt_stop | |
35 | FEXPORT(ret_from_irq) | |
36 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? | |
37 | andi t0, t0, KU_USER | |
38 | beqz t0, resume_kernel | |
39 | ||
40 | FEXPORT(resume_userspace) | |
41 | local_irq_disable t0 # make sure we dont miss an | |
42 | # interrupt setting need_resched | |
43 | # between sampling and return | |
44 | LONG_L a2, TI_FLAGS($28) # current->work | |
45 | andi a2, _TIF_WORK_MASK # (ignoring syscall_trace) | |
46 | bnez a2, work_pending | |
47 | j restore_all | |
48 | ||
49 | #ifdef CONFIG_PREEMPT | |
50 | ENTRY(resume_kernel) | |
51 | lw t0, TI_PRE_COUNT($28) | |
52 | bnez t0, restore_all | |
53 | need_resched: | |
54 | LONG_L t0, TI_FLAGS($28) | |
55 | andi t1, t0, _TIF_NEED_RESCHED | |
56 | beqz t1, restore_all | |
57 | LONG_L t0, PT_STATUS(sp) # Interrupts off? | |
58 | andi t0, 1 | |
59 | beqz t0, restore_all | |
60 | li t0, PREEMPT_ACTIVE | |
61 | sw t0, TI_PRE_COUNT($28) | |
62 | local_irq_enable t0 | |
63 | jal schedule | |
64 | sw zero, TI_PRE_COUNT($28) | |
65 | local_irq_disable t0 | |
66 | b need_resched | |
67 | #endif | |
68 | ||
69 | FEXPORT(ret_from_fork) | |
70 | jal schedule_tail # a0 = task_t *prev | |
71 | ||
72 | FEXPORT(syscall_exit) | |
73 | local_irq_disable # make sure need_resched and | |
74 | # signals dont change between | |
75 | # sampling and return | |
76 | LONG_L a2, TI_FLAGS($28) # current->work | |
77 | li t0, _TIF_ALLWORK_MASK | |
78 | and t0, a2, t0 | |
79 | bnez t0, syscall_exit_work | |
80 | ||
81 | FEXPORT(restore_all) # restore full frame | |
82 | .set noat | |
83 | RESTORE_TEMP | |
84 | RESTORE_AT | |
85 | RESTORE_STATIC | |
86 | FEXPORT(restore_partial) # restore partial frame | |
87 | RESTORE_SOME | |
88 | RESTORE_SP_AND_RET | |
89 | .set at | |
90 | ||
91 | FEXPORT(work_pending) | |
92 | andi t0, a2, _TIF_NEED_RESCHED | |
93 | beqz t0, work_notifysig | |
94 | work_resched: | |
95 | jal schedule | |
96 | ||
97 | local_irq_disable t0 # make sure need_resched and | |
98 | # signals dont change between | |
99 | # sampling and return | |
100 | LONG_L a2, TI_FLAGS($28) | |
101 | andi t0, a2, _TIF_WORK_MASK # is there any work to be done | |
102 | # other than syscall tracing? | |
103 | beqz t0, restore_all | |
104 | andi t0, a2, _TIF_NEED_RESCHED | |
105 | bnez t0, work_resched | |
106 | ||
107 | work_notifysig: # deal with pending signals and | |
108 | # notify-resume requests | |
109 | move a0, sp | |
110 | li a1, 0 | |
111 | jal do_notify_resume # a2 already loaded | |
112 | j restore_all | |
113 | ||
114 | FEXPORT(syscall_exit_work_partial) | |
115 | SAVE_STATIC | |
116 | FEXPORT(syscall_exit_work) | |
117 | LONG_L t0, TI_FLAGS($28) | |
118 | li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | |
119 | and t0, t1 | |
120 | beqz t0, work_pending # trace bit is set | |
121 | local_irq_enable # could let do_syscall_trace() | |
122 | # call schedule() instead | |
123 | move a0, sp | |
124 | li a1, 1 | |
125 | jal do_syscall_trace | |
126 | b resume_userspace | |
127 | ||
128 | /* | |
129 | * Common spurious interrupt handler. | |
130 | */ | |
131 | .text | |
132 | .align 5 | |
133 | LEAF(spurious_interrupt) | |
134 | /* | |
135 | * Someone tried to fool us by sending an interrupt but we | |
136 | * couldn't find a cause for it. | |
137 | */ | |
138 | #ifdef CONFIG_SMP | |
139 | lui t1, %hi(irq_err_count) | |
140 | 1: ll t0, %lo(irq_err_count)(t1) | |
141 | addiu t0, 1 | |
142 | sc t0, %lo(irq_err_count)(t1) | |
143 | #if R10000_LLSC_WAR | |
144 | beqzl t0, 1b | |
145 | #else | |
146 | beqz t0, 1b | |
147 | #endif | |
148 | #else | |
149 | lui t1, %hi(irq_err_count) | |
150 | lw t0, %lo(irq_err_count)(t1) | |
151 | addiu t0, 1 | |
152 | sw t0, %lo(irq_err_count)(t1) | |
153 | #endif | |
154 | j ret_from_irq | |
155 | END(spurious_interrupt) |