Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle | |
7 | * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. | |
8 | * Copyright (C) 1999 Silicon Graphics, Inc. | |
619b6e18 | 9 | * Copyright (C) 2007 Maciej W. Rozycki |
1da177e4 LT |
10 | */ |
11 | #ifndef _ASM_STACKFRAME_H | |
12 | #define _ASM_STACKFRAME_H | |
13 | ||
1da177e4 LT |
14 | #include <linux/threads.h> |
15 | ||
16 | #include <asm/asm.h> | |
41c594ab | 17 | #include <asm/asmmacro.h> |
1da177e4 | 18 | #include <asm/mipsregs.h> |
048eb582 | 19 | #include <asm/asm-offsets.h> |
c2377a42 | 20 | #include <asm/thread_info.h> |
1da177e4 | 21 | |
866b6a89 CM |
22 | /* Make the addition of cfi info a little easier. */ |
23 | .macro cfi_rel_offset reg offset=0 docfi=0 | |
24 | .if \docfi | |
25 | .cfi_rel_offset \reg, \offset | |
26 | .endif | |
27 | .endm | |
28 | ||
29 | .macro cfi_st reg offset=0 docfi=0 | |
30 | LONG_S \reg, \offset(sp) | |
31 | cfi_rel_offset \reg, \offset, \docfi | |
32 | .endm | |
33 | ||
34 | .macro cfi_restore reg offset=0 docfi=0 | |
35 | .if \docfi | |
36 | .cfi_restore \reg | |
37 | .endif | |
38 | .endm | |
39 | ||
40 | .macro cfi_ld reg offset=0 docfi=0 | |
41 | LONG_L \reg, \offset(sp) | |
42 | cfi_restore \reg \offset \docfi | |
43 | .endm | |
44 | ||
455481fc | 45 | #if defined(CONFIG_CPU_R3000) |
fbf6ede2 MR |
46 | #define STATMASK 0x3f |
47 | #else | |
48 | #define STATMASK 0x1f | |
49 | #endif | |
50 | ||
866b6a89 | 51 | .macro SAVE_AT docfi=0 |
1da177e4 LT |
52 | .set push |
53 | .set noat | |
866b6a89 | 54 | cfi_st $1, PT_R1, \docfi |
1da177e4 LT |
55 | .set pop |
56 | .endm | |
57 | ||
866b6a89 | 58 | .macro SAVE_TEMP docfi=0 |
9693a853 FBH |
59 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
60 | mflhxu v1 | |
61 | LONG_S v1, PT_LO(sp) | |
62 | mflhxu v1 | |
63 | LONG_S v1, PT_HI(sp) | |
64 | mflhxu v1 | |
65 | LONG_S v1, PT_ACX(sp) | |
6a0e9865 | 66 | #elif !defined(CONFIG_CPU_MIPSR6) |
1da177e4 | 67 | mfhi v1 |
9693a853 | 68 | #endif |
875d43e7 | 69 | #ifdef CONFIG_32BIT |
866b6a89 CM |
70 | cfi_st $8, PT_R8, \docfi |
71 | cfi_st $9, PT_R9, \docfi | |
1da177e4 | 72 | #endif |
866b6a89 CM |
73 | cfi_st $10, PT_R10, \docfi |
74 | cfi_st $11, PT_R11, \docfi | |
75 | cfi_st $12, PT_R12, \docfi | |
6a0e9865 | 76 | #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) |
362e6964 DD |
77 | LONG_S v1, PT_HI(sp) |
78 | mflo v1 | |
79 | #endif | |
866b6a89 CM |
80 | cfi_st $13, PT_R13, \docfi |
81 | cfi_st $14, PT_R14, \docfi | |
82 | cfi_st $15, PT_R15, \docfi | |
83 | cfi_st $24, PT_R24, \docfi | |
6a0e9865 | 84 | #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) |
362e6964 | 85 | LONG_S v1, PT_LO(sp) |
8dfdd02a DD |
86 | #endif |
87 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | |
88 | /* | |
89 | * The Octeon multiplier state is affected by general | |
90 | * multiply instructions. It must be saved before and | |
91 | * kernel code might corrupt it | |
92 | */ | |
93 | jal octeon_mult_save | |
362e6964 | 94 | #endif |
1da177e4 LT |
95 | .endm |
96 | ||
866b6a89 CM |
97 | .macro SAVE_STATIC docfi=0 |
98 | cfi_st $16, PT_R16, \docfi | |
99 | cfi_st $17, PT_R17, \docfi | |
100 | cfi_st $18, PT_R18, \docfi | |
101 | cfi_st $19, PT_R19, \docfi | |
102 | cfi_st $20, PT_R20, \docfi | |
103 | cfi_st $21, PT_R21, \docfi | |
104 | cfi_st $22, PT_R22, \docfi | |
105 | cfi_st $23, PT_R23, \docfi | |
106 | cfi_st $30, PT_R30, \docfi | |
1da177e4 LT |
107 | .endm |
108 | ||
9fef6868 CM |
109 | /* |
110 | * get_saved_sp returns the SP for the current CPU by looking in the | |
111 | * kernelsp array for it. If tosp is set, it stores the current sp in | |
112 | * k0 and loads the new value in sp. If not, it clobbers k0 and | |
113 | * stores the new value in k1, leaving sp unaffected. | |
114 | */ | |
1da177e4 | 115 | #ifdef CONFIG_SMP |
9fef6868 CM |
116 | |
117 | /* SMP variation */ | |
118 | .macro get_saved_sp docfi=0 tosp=0 | |
c2377a42 | 119 | ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG |
054c51b4 FBH |
120 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) |
121 | lui k1, %hi(kernelsp) | |
122 | #else | |
9b95e629 AN |
123 | lui k1, %highest(kernelsp) |
124 | daddiu k1, %higher(kernelsp) | |
125 | dsll k1, 16 | |
126 | daddiu k1, %hi(kernelsp) | |
127 | dsll k1, 16 | |
9b95e629 | 128 | #endif |
c2377a42 | 129 | LONG_SRL k0, SMP_CPUID_PTRSHIFT |
9b95e629 | 130 | LONG_ADDU k1, k0 |
9fef6868 CM |
131 | .if \tosp |
132 | move k0, sp | |
133 | .if \docfi | |
134 | .cfi_register sp, k0 | |
135 | .endif | |
136 | LONG_L sp, %lo(kernelsp)(k1) | |
137 | .else | |
85b6e818 | 138 | LONG_L k1, %lo(kernelsp)(k1) |
9fef6868 | 139 | .endif |
1da177e4 LT |
140 | .endm |
141 | ||
142 | .macro set_saved_sp stackp temp temp2 | |
c2377a42 J |
143 | ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG |
144 | LONG_SRL \temp, SMP_CPUID_PTRSHIFT | |
9556ac2f | 145 | LONG_S \stackp, kernelsp(\temp) |
1da177e4 | 146 | .endm |
c2377a42 | 147 | #else /* !CONFIG_SMP */ |
9fef6868 CM |
148 | /* Uniprocessor variation */ |
149 | .macro get_saved_sp docfi=0 tosp=0 | |
b197b628 | 150 | #ifdef CONFIG_CPU_JUMP_WORKAROUNDS |
f1df3239 WZ |
151 | /* |
152 | * Clear BTB (branch target buffer), forbid RAS (return address | |
153 | * stack) to workaround the Out-of-order Issue in Loongson2F | |
154 | * via its diagnostic register. | |
155 | */ | |
156 | move k0, ra | |
157 | jal 1f | |
158 | nop | |
159 | 1: jal 1f | |
160 | nop | |
161 | 1: jal 1f | |
162 | nop | |
163 | 1: jal 1f | |
164 | nop | |
165 | 1: move ra, k0 | |
166 | li k0, 3 | |
167 | mtc0 k0, $22 | |
2a0b24f5 | 168 | #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ |
054c51b4 FBH |
169 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) |
170 | lui k1, %hi(kernelsp) | |
171 | #else | |
9556ac2f TS |
172 | lui k1, %highest(kernelsp) |
173 | daddiu k1, %higher(kernelsp) | |
174 | dsll k1, k1, 16 | |
175 | daddiu k1, %hi(kernelsp) | |
176 | dsll k1, k1, 16 | |
9556ac2f | 177 | #endif |
9fef6868 CM |
178 | .if \tosp |
179 | move k0, sp | |
180 | .if \docfi | |
181 | .cfi_register sp, k0 | |
182 | .endif | |
183 | LONG_L sp, %lo(kernelsp)(k1) | |
184 | .else | |
1da177e4 | 185 | LONG_L k1, %lo(kernelsp)(k1) |
9fef6868 | 186 | .endif |
1da177e4 LT |
187 | .endm |
188 | ||
189 | .macro set_saved_sp stackp temp temp2 | |
190 | LONG_S \stackp, kernelsp | |
191 | .endm | |
192 | #endif | |
193 | ||
866b6a89 | 194 | .macro SAVE_SOME docfi=0 |
1da177e4 LT |
195 | .set push |
196 | .set noat | |
197 | .set reorder | |
198 | mfc0 k0, CP0_STATUS | |
199 | sll k0, 3 /* extract cu0 bit */ | |
200 | .set noreorder | |
201 | bltz k0, 8f | |
c496f3c0 MR |
202 | move k0, sp |
203 | .if \docfi | |
204 | .cfi_register sp, k0 | |
205 | .endif | |
3aff47c0 JH |
206 | #ifdef CONFIG_EVA |
207 | /* | |
208 | * Flush interAptiv's Return Prediction Stack (RPS) by writing | |
209 | * EntryHi. Toggling Config7.RPS is slower and less portable. | |
210 | * | |
211 | * The RPS isn't automatically flushed when exceptions are | |
212 | * taken, which can result in kernel mode speculative accesses | |
213 | * to user addresses if the RPS mispredicts. That's harmless | |
214 | * when user and kernel share the same address space, but with | |
215 | * EVA the same user segments may be unmapped to kernel mode, | |
216 | * even containing sensitive MMIO regions or invalid memory. | |
217 | * | |
218 | * This can happen when the kernel sets the return address to | |
219 | * ret_from_* and jr's to the exception handler, which looks | |
220 | * more like a tail call than a function call. If nested calls | |
221 | * don't evict the last user address in the RPS, it will | |
222 | * mispredict the return and fetch from a user controlled | |
223 | * address into the icache. | |
224 | * | |
225 | * More recent EVA-capable cores with MAAR to restrict | |
226 | * speculative accesses aren't affected. | |
227 | */ | |
228 | MFC0 k0, CP0_ENTRYHI | |
229 | MTC0 k0, CP0_ENTRYHI | |
230 | #endif | |
1da177e4 LT |
231 | .set reorder |
232 | /* Called from user mode, new stack. */ | |
866b6a89 | 233 | get_saved_sp docfi=\docfi tosp=1 |
9fef6868 CM |
234 | 8: |
235 | #ifdef CONFIG_CPU_DADDI_WORKAROUNDS | |
236 | .set at=k1 | |
237 | #endif | |
238 | PTR_SUBU sp, PT_SIZE | |
239 | #ifdef CONFIG_CPU_DADDI_WORKAROUNDS | |
619b6e18 | 240 | .set noat |
619b6e18 | 241 | #endif |
866b6a89 CM |
242 | .if \docfi |
243 | .cfi_def_cfa sp,0 | |
244 | .endif | |
245 | cfi_st k0, PT_R29, \docfi | |
246 | cfi_rel_offset sp, PT_R29, \docfi | |
247 | cfi_st v1, PT_R3, \docfi | |
41c594ab RB |
248 | /* |
249 | * You might think that you don't need to save $0, | |
250 | * but the FPU emulator and gdb remote debug stub | |
251 | * need it to operate correctly | |
252 | */ | |
1da177e4 LT |
253 | LONG_S $0, PT_R0(sp) |
254 | mfc0 v1, CP0_STATUS | |
866b6a89 | 255 | cfi_st v0, PT_R2, \docfi |
2a0b24f5 | 256 | LONG_S v1, PT_STATUS(sp) |
866b6a89 | 257 | cfi_st $4, PT_R4, \docfi |
362e6964 | 258 | mfc0 v1, CP0_CAUSE |
866b6a89 | 259 | cfi_st $5, PT_R5, \docfi |
362e6964 | 260 | LONG_S v1, PT_CAUSE(sp) |
866b6a89 CM |
261 | cfi_st $6, PT_R6, \docfi |
262 | cfi_st ra, PT_R31, \docfi | |
9fef6868 | 263 | MFC0 ra, CP0_EPC |
866b6a89 | 264 | cfi_st $7, PT_R7, \docfi |
875d43e7 | 265 | #ifdef CONFIG_64BIT |
866b6a89 CM |
266 | cfi_st $8, PT_R8, \docfi |
267 | cfi_st $9, PT_R9, \docfi | |
1da177e4 | 268 | #endif |
9fef6868 | 269 | LONG_S ra, PT_EPC(sp) |
866b6a89 CM |
270 | .if \docfi |
271 | .cfi_rel_offset ra, PT_EPC | |
272 | .endif | |
273 | cfi_st $25, PT_R25, \docfi | |
274 | cfi_st $28, PT_R28, \docfi | |
510d8636 MR |
275 | |
276 | /* Set thread_info if we're coming from user mode */ | |
277 | mfc0 k0, CP0_STATUS | |
278 | sll k0, 3 /* extract cu0 bit */ | |
279 | bltz k0, 9f | |
280 | ||
1da177e4 LT |
281 | ori $28, sp, _THREAD_MASK |
282 | xori $28, _THREAD_MASK | |
2a219b0e | 283 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
8dfdd02a DD |
284 | .set mips64 |
285 | pref 0, 0($28) /* Prefetch the current pointer */ | |
2a219b0e | 286 | #endif |
510d8636 | 287 | 9: |
1da177e4 LT |
288 | .set pop |
289 | .endm | |
290 | ||
866b6a89 CM |
291 | .macro SAVE_ALL docfi=0 |
292 | SAVE_SOME \docfi | |
293 | SAVE_AT \docfi | |
294 | SAVE_TEMP \docfi | |
295 | SAVE_STATIC \docfi | |
1da177e4 LT |
296 | .endm |
297 | ||
866b6a89 | 298 | .macro RESTORE_AT docfi=0 |
1da177e4 LT |
299 | .set push |
300 | .set noat | |
866b6a89 | 301 | cfi_ld $1, PT_R1, \docfi |
1da177e4 LT |
302 | .set pop |
303 | .endm | |
304 | ||
866b6a89 | 305 | .macro RESTORE_TEMP docfi=0 |
8dfdd02a DD |
306 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
307 | /* Restore the Octeon multiplier state */ | |
308 | jal octeon_mult_restore | |
309 | #endif | |
9693a853 | 310 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
07e6a6d7 | 311 | LONG_L $14, PT_ACX(sp) |
1da177e4 | 312 | LONG_L $24, PT_LO(sp) |
07e6a6d7 | 313 | LONG_L $15, PT_HI(sp) |
6a0e9865 | 314 | #elif !defined(CONFIG_CPU_MIPSR6) |
9693a853 | 315 | LONG_L $24, PT_LO(sp) |
07e6a6d7 | 316 | LONG_L $15, PT_HI(sp) |
9693a853 | 317 | #endif |
875d43e7 | 318 | #ifdef CONFIG_32BIT |
866b6a89 CM |
319 | cfi_ld $8, PT_R8, \docfi |
320 | cfi_ld $9, PT_R9, \docfi | |
1da177e4 | 321 | #endif |
866b6a89 CM |
322 | cfi_ld $10, PT_R10, \docfi |
323 | cfi_ld $11, PT_R11, \docfi | |
324 | cfi_ld $12, PT_R12, \docfi | |
07e6a6d7 SV |
325 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
326 | mtlhx $14 | |
327 | mtlhx $15 | |
328 | mtlhx $24 | |
329 | #elif !defined(CONFIG_CPU_MIPSR6) | |
330 | mtlo $24 | |
331 | mthi $15 | |
332 | #endif | |
866b6a89 CM |
333 | cfi_ld $13, PT_R13, \docfi |
334 | cfi_ld $14, PT_R14, \docfi | |
335 | cfi_ld $15, PT_R15, \docfi | |
336 | cfi_ld $24, PT_R24, \docfi | |
1da177e4 LT |
337 | .endm |
338 | ||
866b6a89 CM |
339 | .macro RESTORE_STATIC docfi=0 |
340 | cfi_ld $16, PT_R16, \docfi | |
341 | cfi_ld $17, PT_R17, \docfi | |
342 | cfi_ld $18, PT_R18, \docfi | |
343 | cfi_ld $19, PT_R19, \docfi | |
344 | cfi_ld $20, PT_R20, \docfi | |
345 | cfi_ld $21, PT_R21, \docfi | |
346 | cfi_ld $22, PT_R22, \docfi | |
347 | cfi_ld $23, PT_R23, \docfi | |
348 | cfi_ld $30, PT_R30, \docfi | |
349 | .endm | |
350 | ||
351 | .macro RESTORE_SP docfi=0 | |
352 | cfi_ld sp, PT_R29, \docfi | |
1da177e4 LT |
353 | .endm |
354 | ||
455481fc | 355 | #if defined(CONFIG_CPU_R3000) |
1da177e4 | 356 | |
866b6a89 | 357 | .macro RESTORE_SOME docfi=0 |
1da177e4 LT |
358 | .set push |
359 | .set reorder | |
360 | .set noat | |
361 | mfc0 a0, CP0_STATUS | |
00fe56dc | 362 | li v1, ST0_CU1 | ST0_IM |
fbf6ede2 MR |
363 | ori a0, STATMASK |
364 | xori a0, STATMASK | |
365 | mtc0 a0, CP0_STATUS | |
1da177e4 LT |
366 | and a0, v1 |
367 | LONG_L v0, PT_STATUS(sp) | |
368 | nor v1, $0, v1 | |
369 | and v0, v1 | |
370 | or v0, a0 | |
371 | mtc0 v0, CP0_STATUS | |
866b6a89 CM |
372 | cfi_ld $31, PT_R31, \docfi |
373 | cfi_ld $28, PT_R28, \docfi | |
374 | cfi_ld $25, PT_R25, \docfi | |
375 | cfi_ld $7, PT_R7, \docfi | |
376 | cfi_ld $6, PT_R6, \docfi | |
377 | cfi_ld $5, PT_R5, \docfi | |
378 | cfi_ld $4, PT_R4, \docfi | |
379 | cfi_ld $3, PT_R3, \docfi | |
380 | cfi_ld $2, PT_R2, \docfi | |
1da177e4 LT |
381 | .set pop |
382 | .endm | |
383 | ||
866b6a89 | 384 | .macro RESTORE_SP_AND_RET docfi=0 |
1da177e4 LT |
385 | .set push |
386 | .set noreorder | |
387 | LONG_L k0, PT_EPC(sp) | |
866b6a89 | 388 | RESTORE_SP \docfi |
1da177e4 LT |
389 | jr k0 |
390 | rfe | |
391 | .set pop | |
392 | .endm | |
393 | ||
394 | #else | |
866b6a89 | 395 | .macro RESTORE_SOME docfi=0 |
1da177e4 LT |
396 | .set push |
397 | .set reorder | |
398 | .set noat | |
399 | mfc0 a0, CP0_STATUS | |
41c594ab RB |
400 | ori a0, STATMASK |
401 | xori a0, STATMASK | |
1da177e4 | 402 | mtc0 a0, CP0_STATUS |
00fe56dc | 403 | li v1, ST0_CU1 | ST0_FR | ST0_IM |
1da177e4 LT |
404 | and a0, v1 |
405 | LONG_L v0, PT_STATUS(sp) | |
406 | nor v1, $0, v1 | |
407 | and v0, v1 | |
408 | or v0, a0 | |
409 | mtc0 v0, CP0_STATUS | |
410 | LONG_L v1, PT_EPC(sp) | |
411 | MTC0 v1, CP0_EPC | |
866b6a89 CM |
412 | cfi_ld $31, PT_R31, \docfi |
413 | cfi_ld $28, PT_R28, \docfi | |
414 | cfi_ld $25, PT_R25, \docfi | |
875d43e7 | 415 | #ifdef CONFIG_64BIT |
866b6a89 CM |
416 | cfi_ld $8, PT_R8, \docfi |
417 | cfi_ld $9, PT_R9, \docfi | |
1da177e4 | 418 | #endif |
866b6a89 CM |
419 | cfi_ld $7, PT_R7, \docfi |
420 | cfi_ld $6, PT_R6, \docfi | |
421 | cfi_ld $5, PT_R5, \docfi | |
422 | cfi_ld $4, PT_R4, \docfi | |
423 | cfi_ld $3, PT_R3, \docfi | |
424 | cfi_ld $2, PT_R2, \docfi | |
1da177e4 LT |
425 | .set pop |
426 | .endm | |
427 | ||
866b6a89 CM |
428 | .macro RESTORE_SP_AND_RET docfi=0 |
429 | RESTORE_SP \docfi | |
ab7c01fd | 430 | #if defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) |
e11124d8 PB |
431 | eretnc |
432 | #else | |
378ed6f0 | 433 | .set push |
a809d460 | 434 | .set arch=r4000 |
1da177e4 | 435 | eret |
378ed6f0 | 436 | .set pop |
e11124d8 | 437 | #endif |
1da177e4 LT |
438 | .endm |
439 | ||
440 | #endif | |
441 | ||
866b6a89 CM |
442 | .macro RESTORE_ALL docfi=0 |
443 | RESTORE_TEMP \docfi | |
444 | RESTORE_STATIC \docfi | |
445 | RESTORE_AT \docfi | |
446 | RESTORE_SOME \docfi | |
447 | RESTORE_SP \docfi | |
1da177e4 LT |
448 | .endm |
449 | ||
1da177e4 LT |
450 | /* |
451 | * Move to kernel mode and disable interrupts. | |
452 | * Set cp0 enable bit as sign that we're running on the kernel stack | |
453 | */ | |
454 | .macro CLI | |
455 | mfc0 t0, CP0_STATUS | |
195615ec | 456 | li t1, ST0_KERNEL_CUMASK | STATMASK |
1da177e4 | 457 | or t0, t1 |
fbf6ede2 | 458 | xori t0, STATMASK |
1da177e4 LT |
459 | mtc0 t0, CP0_STATUS |
460 | irq_disable_hazard | |
461 | .endm | |
462 | ||
463 | /* | |
464 | * Move to kernel mode and enable interrupts. | |
465 | * Set cp0 enable bit as sign that we're running on the kernel stack | |
466 | */ | |
467 | .macro STI | |
468 | mfc0 t0, CP0_STATUS | |
195615ec | 469 | li t1, ST0_KERNEL_CUMASK | STATMASK |
1da177e4 | 470 | or t0, t1 |
fbf6ede2 | 471 | xori t0, STATMASK & ~1 |
1da177e4 LT |
472 | mtc0 t0, CP0_STATUS |
473 | irq_enable_hazard | |
474 | .endm | |
475 | ||
476 | /* | |
fbf6ede2 MR |
477 | * Just move to kernel mode and leave interrupts as they are. Note |
478 | * for the R3000 this means copying the previous enable from IEp. | |
1da177e4 LT |
479 | * Set cp0 enable bit as sign that we're running on the kernel stack |
480 | */ | |
481 | .macro KMODE | |
482 | mfc0 t0, CP0_STATUS | |
195615ec | 483 | li t1, ST0_KERNEL_CUMASK | (STATMASK & ~1) |
455481fc | 484 | #if defined(CONFIG_CPU_R3000) |
fbf6ede2 MR |
485 | andi t2, t0, ST0_IEP |
486 | srl t2, 2 | |
487 | or t0, t2 | |
488 | #endif | |
1da177e4 | 489 | or t0, t1 |
fbf6ede2 | 490 | xori t0, STATMASK & ~1 |
1da177e4 LT |
491 | mtc0 t0, CP0_STATUS |
492 | irq_disable_hazard | |
493 | .endm | |
494 | ||
495 | #endif /* _ASM_STACKFRAME_H */ |