MIPS: Fix duplicate CP0_* definitions.
[linux-2.6-block.git] / arch / mips / kvm / locore.S
CommitLineData
b680f70f 1/*
2c07ebbd
DD
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Main entry point for the guest, exception handling.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
b680f70f
SL
11
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15#include <asm/mipsregs.h>
16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h>
18
b680f70f
SL
19#define _C_LABEL(x) x
20#define MIPSX(name) mips32_ ## name
21#define CALLFRAME_SIZ 32
22
23/*
24 * VECTOR
25 * exception vector entrypoint
26 */
27#define VECTOR(x, regmask) \
28 .ent _C_LABEL(x),0; \
29 EXPORT(x);
30
31#define VECTOR_END(x) \
32 EXPORT(x);
33
34/* Overload, Danger Will Robinson!! */
35#define PT_HOST_ASID PT_BVADDR
36#define PT_HOST_USERLOCAL PT_EPC
37
38#define CP0_DDATA_LO $28,3
b680f70f
SL
39
40/* Resume Flags */
41#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
42
43#define RESUME_GUEST 0
44#define RESUME_HOST RESUME_FLAG_HOST
45
46/*
47 * __kvm_mips_vcpu_run: entry point to the guest
48 * a0: run
49 * a1: vcpu
50 */
2c07ebbd
DD
51 .set noreorder
52 .set noat
53
bb48c2fc 54FEXPORT(__kvm_mips_vcpu_run)
2c07ebbd 55 /* k0/k1 not being used in host kernel context */
ea69f28d 56 INT_ADDIU k1, sp, -PT_SIZE
2c07ebbd
DD
57 LONG_S $0, PT_R0(k1)
58 LONG_S $1, PT_R1(k1)
59 LONG_S $2, PT_R2(k1)
60 LONG_S $3, PT_R3(k1)
61
62 LONG_S $4, PT_R4(k1)
63 LONG_S $5, PT_R5(k1)
64 LONG_S $6, PT_R6(k1)
65 LONG_S $7, PT_R7(k1)
66
67 LONG_S $8, PT_R8(k1)
68 LONG_S $9, PT_R9(k1)
69 LONG_S $10, PT_R10(k1)
70 LONG_S $11, PT_R11(k1)
71 LONG_S $12, PT_R12(k1)
72 LONG_S $13, PT_R13(k1)
73 LONG_S $14, PT_R14(k1)
74 LONG_S $15, PT_R15(k1)
75 LONG_S $16, PT_R16(k1)
76 LONG_S $17, PT_R17(k1)
77
78 LONG_S $18, PT_R18(k1)
79 LONG_S $19, PT_R19(k1)
80 LONG_S $20, PT_R20(k1)
81 LONG_S $21, PT_R21(k1)
82 LONG_S $22, PT_R22(k1)
83 LONG_S $23, PT_R23(k1)
84 LONG_S $24, PT_R24(k1)
85 LONG_S $25, PT_R25(k1)
b680f70f 86
d116e812
DCZ
87 /*
88 * XXXKYMA k0/k1 not saved, not being used if we got here through
89 * an ioctl()
90 */
b680f70f 91
2c07ebbd
DD
92 LONG_S $28, PT_R28(k1)
93 LONG_S $29, PT_R29(k1)
94 LONG_S $30, PT_R30(k1)
95 LONG_S $31, PT_R31(k1)
b680f70f 96
2c07ebbd
DD
97 /* Save hi/lo */
98 mflo v0
99 LONG_S v0, PT_LO(k1)
100 mfhi v1
101 LONG_S v1, PT_HI(k1)
b680f70f
SL
102
103 /* Save host status */
2c07ebbd
DD
104 mfc0 v0, CP0_STATUS
105 LONG_S v0, PT_STATUS(k1)
b680f70f
SL
106
107 /* Save host ASID, shove it into the BVADDR location */
2c07ebbd
DD
108 mfc0 v1, CP0_ENTRYHI
109 andi v1, 0xff
110 LONG_S v1, PT_HOST_ASID(k1)
b680f70f 111
2c07ebbd
DD
112 /* Save DDATA_LO, will be used to store pointer to vcpu */
113 mfc0 v1, CP0_DDATA_LO
114 LONG_S v1, PT_HOST_USERLOCAL(k1)
b680f70f 115
2c07ebbd
DD
116 /* DDATA_LO has pointer to vcpu */
117 mtc0 a1, CP0_DDATA_LO
b680f70f 118
2c07ebbd 119 /* Offset into vcpu->arch */
ea69f28d 120 INT_ADDIU k1, a1, VCPU_HOST_ARCH
b680f70f 121
2c07ebbd
DD
122 /*
123 * Save the host stack to VCPU, used for exception processing
124 * when we exit from the Guest
125 */
126 LONG_S sp, VCPU_HOST_STACK(k1)
b680f70f 127
2c07ebbd
DD
128 /* Save the kernel gp as well */
129 LONG_S gp, VCPU_HOST_GP(k1)
b680f70f 130
d116e812
DCZ
131 /*
132 * Setup status register for running the guest in UM, interrupts
133 * are disabled
134 */
2c07ebbd
DD
135 li k0, (ST0_EXL | KSU_USER | ST0_BEV)
136 mtc0 k0, CP0_STATUS
137 ehb
138
139 /* load up the new EBASE */
140 LONG_L k0, VCPU_GUEST_EBASE(k1)
141 mtc0 k0, CP0_EBASE
142
143 /*
144 * Now that the new EBASE has been loaded, unset BEV, set
145 * interrupt mask as it was but make sure that timer interrupts
146 * are enabled
147 */
148 li k0, (ST0_EXL | KSU_USER | ST0_IE)
149 andi v0, v0, ST0_IM
150 or k0, k0, v0
151 mtc0 k0, CP0_STATUS
152 ehb
b680f70f 153
b680f70f 154 /* Set Guest EPC */
2c07ebbd
DD
155 LONG_L t0, VCPU_PC(k1)
156 mtc0 t0, CP0_EPC
b680f70f
SL
157
158FEXPORT(__kvm_mips_load_asid)
2c07ebbd 159 /* Set the ASID for the Guest Kernel */
ea69f28d 160 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
2c07ebbd
DD
161 /* addresses shift to 0x80000000 */
162 bltz t0, 1f /* If kernel */
ea69f28d
DD
163 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
164 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
b680f70f 1651:
d116e812 166 /* t1: contains the base of the ASID array, need to get the cpu id */
2c07ebbd 167 LONG_L t2, TI_CPU($28) /* smp_processor_id */
ea69f28d
DD
168 INT_SLL t2, t2, 2 /* x4 */
169 REG_ADDU t3, t1, t2
2c07ebbd
DD
170 LONG_L k0, (t3)
171 andi k0, k0, 0xff
172 mtc0 k0, CP0_ENTRYHI
173 ehb
174
175 /* Disable RDHWR access */
176 mtc0 zero, CP0_HWRENA
177
178 /* Now load up the Guest Context from VCPU */
179 LONG_L $1, VCPU_R1(k1)
180 LONG_L $2, VCPU_R2(k1)
181 LONG_L $3, VCPU_R3(k1)
182
183 LONG_L $4, VCPU_R4(k1)
184 LONG_L $5, VCPU_R5(k1)
185 LONG_L $6, VCPU_R6(k1)
186 LONG_L $7, VCPU_R7(k1)
187
188 LONG_L $8, VCPU_R8(k1)
189 LONG_L $9, VCPU_R9(k1)
190 LONG_L $10, VCPU_R10(k1)
191 LONG_L $11, VCPU_R11(k1)
192 LONG_L $12, VCPU_R12(k1)
193 LONG_L $13, VCPU_R13(k1)
194 LONG_L $14, VCPU_R14(k1)
195 LONG_L $15, VCPU_R15(k1)
196 LONG_L $16, VCPU_R16(k1)
197 LONG_L $17, VCPU_R17(k1)
198 LONG_L $18, VCPU_R18(k1)
199 LONG_L $19, VCPU_R19(k1)
200 LONG_L $20, VCPU_R20(k1)
201 LONG_L $21, VCPU_R21(k1)
202 LONG_L $22, VCPU_R22(k1)
203 LONG_L $23, VCPU_R23(k1)
204 LONG_L $24, VCPU_R24(k1)
205 LONG_L $25, VCPU_R25(k1)
206
207 /* k0/k1 loaded up later */
208
209 LONG_L $28, VCPU_R28(k1)
210 LONG_L $29, VCPU_R29(k1)
211 LONG_L $30, VCPU_R30(k1)
212 LONG_L $31, VCPU_R31(k1)
213
214 /* Restore hi/lo */
215 LONG_L k0, VCPU_LO(k1)
216 mtlo k0
217
218 LONG_L k0, VCPU_HI(k1)
219 mthi k0
b680f70f
SL
220
221FEXPORT(__kvm_mips_load_k0k1)
222 /* Restore the guest's k0/k1 registers */
2c07ebbd
DD
223 LONG_L k0, VCPU_R26(k1)
224 LONG_L k1, VCPU_R27(k1)
b680f70f 225
2c07ebbd 226 /* Jump to guest */
b680f70f 227 eret
b680f70f
SL
228
229VECTOR(MIPSX(exception), unknown)
d116e812 230/* Find out what mode we came from and jump to the proper handler. */
2c07ebbd
DD
231 mtc0 k0, CP0_ERROREPC #01: Save guest k0
232 ehb #02:
233
234 mfc0 k0, CP0_EBASE #02: Get EBASE
ea69f28d
DD
235 INT_SRL k0, k0, 10 #03: Get rid of CPUNum
236 INT_SLL k0, k0, 10 #04
2c07ebbd 237 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
d116e812
DCZ
238 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
239 # installed @ offset 0x2000
2c07ebbd
DD
240 j k0 #07: jump to the function
241 nop #08: branch delay slot
b680f70f
SL
242VECTOR_END(MIPSX(exceptionEnd))
243.end MIPSX(exception)
244
245/*
246 * Generic Guest exception handler. We end up here when the guest
247 * does something that causes a trap to kernel mode.
b680f70f
SL
248 */
249NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
2c07ebbd
DD
250 /* Get the VCPU pointer from DDTATA_LO */
251 mfc0 k1, CP0_DDATA_LO
ea69f28d 252 INT_ADDIU k1, k1, VCPU_HOST_ARCH
2c07ebbd
DD
253
254 /* Start saving Guest context to VCPU */
255 LONG_S $0, VCPU_R0(k1)
256 LONG_S $1, VCPU_R1(k1)
257 LONG_S $2, VCPU_R2(k1)
258 LONG_S $3, VCPU_R3(k1)
259 LONG_S $4, VCPU_R4(k1)
260 LONG_S $5, VCPU_R5(k1)
261 LONG_S $6, VCPU_R6(k1)
262 LONG_S $7, VCPU_R7(k1)
263 LONG_S $8, VCPU_R8(k1)
264 LONG_S $9, VCPU_R9(k1)
265 LONG_S $10, VCPU_R10(k1)
266 LONG_S $11, VCPU_R11(k1)
267 LONG_S $12, VCPU_R12(k1)
268 LONG_S $13, VCPU_R13(k1)
269 LONG_S $14, VCPU_R14(k1)
270 LONG_S $15, VCPU_R15(k1)
271 LONG_S $16, VCPU_R16(k1)
272 LONG_S $17, VCPU_R17(k1)
273 LONG_S $18, VCPU_R18(k1)
274 LONG_S $19, VCPU_R19(k1)
275 LONG_S $20, VCPU_R20(k1)
276 LONG_S $21, VCPU_R21(k1)
277 LONG_S $22, VCPU_R22(k1)
278 LONG_S $23, VCPU_R23(k1)
279 LONG_S $24, VCPU_R24(k1)
280 LONG_S $25, VCPU_R25(k1)
281
282 /* Guest k0/k1 saved later */
283
284 LONG_S $28, VCPU_R28(k1)
285 LONG_S $29, VCPU_R29(k1)
286 LONG_S $30, VCPU_R30(k1)
287 LONG_S $31, VCPU_R31(k1)
288
d116e812 289 /* We need to save hi/lo and restore them on the way out */
2c07ebbd
DD
290 mfhi t0
291 LONG_S t0, VCPU_HI(k1)
292
293 mflo t0
294 LONG_S t0, VCPU_LO(k1)
295
296 /* Finally save guest k0/k1 to VCPU */
297 mfc0 t0, CP0_ERROREPC
298 LONG_S t0, VCPU_R26(k1)
299
300 /* Get GUEST k1 and save it in VCPU */
ea69f28d 301 PTR_LI t1, ~0x2ff
2c07ebbd
DD
302 mfc0 t0, CP0_EBASE
303 and t0, t0, t1
304 LONG_L t0, 0x3000(t0)
305 LONG_S t0, VCPU_R27(k1)
306
307 /* Now that context has been saved, we can use other registers */
308
309 /* Restore vcpu */
310 mfc0 a1, CP0_DDATA_LO
311 move s1, a1
312
313 /* Restore run (vcpu->run) */
314 LONG_L a0, VCPU_RUN(a1)
315 /* Save pointer to run in s0, will be saved by the compiler */
316 move s0, a0
317
d116e812
DCZ
318 /*
319 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
320 * process the exception
321 */
2c07ebbd
DD
322 mfc0 k0,CP0_EPC
323 LONG_S k0, VCPU_PC(k1)
324
325 mfc0 k0, CP0_BADVADDR
326 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
327
328 mfc0 k0, CP0_CAUSE
329 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
330
331 mfc0 k0, CP0_ENTRYHI
332 LONG_S k0, VCPU_HOST_ENTRYHI(k1)
333
334 /* Now restore the host state just enough to run the handlers */
335
336 /* Swtich EBASE to the one used by Linux */
337 /* load up the host EBASE */
338 mfc0 v0, CP0_STATUS
339
340 .set at
341 or k0, v0, ST0_BEV
342 .set noat
343
344 mtc0 k0, CP0_STATUS
345 ehb
b680f70f 346
2c07ebbd
DD
347 LONG_L k0, VCPU_HOST_EBASE(k1)
348 mtc0 k0,CP0_EBASE
b680f70f 349
98e91b84
JH
350 /*
351 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
352 * trigger FPE for pending exceptions.
353 */
354 .set at
355 and v1, v0, ST0_CU1
356 beqz v1, 1f
357 nop
358 .set push
359 SET_HARDFLOAT
360 cfc1 t0, fcr31
361 sw t0, VCPU_FCR31(k1)
362 ctc1 zero,fcr31
363 .set pop
364 .set noat
3651:
366
539cb89f
JH
367#ifdef CONFIG_CPU_HAS_MSA
368 /*
369 * If MSA is enabled, save MSACSR and clear it so that later
370 * instructions don't trigger MSAFPE for pending exceptions.
371 */
372 mfc0 t0, CP0_CONFIG3
373 ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
374 beqz t0, 1f
375 nop
376 mfc0 t0, CP0_CONFIG5
377 ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
378 beqz t0, 1f
379 nop
380 _cfcmsa t0, MSA_CSR
381 sw t0, VCPU_MSA_CSR(k1)
382 _ctcmsa MSA_CSR, zero
3831:
384#endif
385
2c07ebbd
DD
386 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
387 .set at
388 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
389 or v0, v0, ST0_CU0
390 .set noat
391 mtc0 v0, CP0_STATUS
392 ehb
b680f70f 393
2c07ebbd
DD
394 /* Load up host GP */
395 LONG_L gp, VCPU_HOST_GP(k1)
396
397 /* Need a stack before we can jump to "C" */
398 LONG_L sp, VCPU_HOST_STACK(k1)
399
400 /* Saved host state */
ea69f28d 401 INT_ADDIU sp, sp, -PT_SIZE
2c07ebbd 402
d116e812
DCZ
403 /*
404 * XXXKYMA do we need to load the host ASID, maybe not because the
2c07ebbd
DD
405 * kernel entries are marked GLOBAL, need to verify
406 */
407
408 /* Restore host DDATA_LO */
409 LONG_L k0, PT_HOST_USERLOCAL(sp)
410 mtc0 k0, CP0_DDATA_LO
411
412 /* Restore RDHWR access */
ea69f28d 413 PTR_LI k0, 0x2000000F
2c07ebbd
DD
414 mtc0 k0, CP0_HWRENA
415
416 /* Jump to handler */
b680f70f 417FEXPORT(__kvm_mips_jump_to_handler)
d116e812
DCZ
418 /*
419 * XXXKYMA: not sure if this is safe, how large is the stack??
2c07ebbd 420 * Now jump to the kvm_mips_handle_exit() to see if we can deal
d116e812
DCZ
421 * with this in the kernel
422 */
ea69f28d 423 PTR_LA t9, kvm_mips_handle_exit
2c07ebbd 424 jalr.hb t9
ea69f28d 425 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
2c07ebbd
DD
426
427 /* Return from handler Make sure interrupts are disabled */
428 di
429 ehb
430
d116e812
DCZ
431 /*
432 * XXXKYMA: k0/k1 could have been blown away if we processed
2c07ebbd
DD
433 * an exception while we were handling the exception from the
434 * guest, reload k1
435 */
436
437 move k1, s1
ea69f28d 438 INT_ADDIU k1, k1, VCPU_HOST_ARCH
2c07ebbd 439
d116e812
DCZ
440 /*
441 * Check return value, should tell us if we are returning to the
2c07ebbd
DD
442 * host (handle I/O etc)or resuming the guest
443 */
444 andi t0, v0, RESUME_HOST
445 bnez t0, __kvm_mips_return_to_host
446 nop
b680f70f
SL
447
448__kvm_mips_return_to_guest:
2c07ebbd
DD
449 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
450 mtc0 s1, CP0_DDATA_LO
b680f70f 451
2c07ebbd
DD
452 /* Load up the Guest EBASE to minimize the window where BEV is set */
453 LONG_L t0, VCPU_GUEST_EBASE(k1)
454
455 /* Switch EBASE back to the one used by KVM */
456 mfc0 v1, CP0_STATUS
457 .set at
458 or k0, v1, ST0_BEV
459 .set noat
460 mtc0 k0, CP0_STATUS
461 ehb
462 mtc0 t0, CP0_EBASE
463
464 /* Setup status register for running guest in UM */
465 .set at
466 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
f798217d 467 and v1, v1, ~(ST0_CU0 | ST0_MX)
2c07ebbd
DD
468 .set noat
469 mtc0 v1, CP0_STATUS
470 ehb
b680f70f
SL
471
472 /* Set Guest EPC */
2c07ebbd
DD
473 LONG_L t0, VCPU_PC(k1)
474 mtc0 t0, CP0_EPC
475
476 /* Set the ASID for the Guest Kernel */
ea69f28d 477 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
2c07ebbd
DD
478 /* addresses shift to 0x80000000 */
479 bltz t0, 1f /* If kernel */
ea69f28d
DD
480 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
481 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
b680f70f 4821:
2c07ebbd
DD
483 /* t1: contains the base of the ASID array, need to get the cpu id */
484 LONG_L t2, TI_CPU($28) /* smp_processor_id */
ea69f28d
DD
485 INT_SLL t2, t2, 2 /* x4 */
486 REG_ADDU t3, t1, t2
2c07ebbd
DD
487 LONG_L k0, (t3)
488 andi k0, k0, 0xff
489 mtc0 k0,CP0_ENTRYHI
490 ehb
491
492 /* Disable RDHWR access */
493 mtc0 zero, CP0_HWRENA
494
495 /* load the guest context from VCPU and return */
496 LONG_L $0, VCPU_R0(k1)
497 LONG_L $1, VCPU_R1(k1)
498 LONG_L $2, VCPU_R2(k1)
499 LONG_L $3, VCPU_R3(k1)
500 LONG_L $4, VCPU_R4(k1)
501 LONG_L $5, VCPU_R5(k1)
502 LONG_L $6, VCPU_R6(k1)
503 LONG_L $7, VCPU_R7(k1)
504 LONG_L $8, VCPU_R8(k1)
505 LONG_L $9, VCPU_R9(k1)
506 LONG_L $10, VCPU_R10(k1)
507 LONG_L $11, VCPU_R11(k1)
508 LONG_L $12, VCPU_R12(k1)
509 LONG_L $13, VCPU_R13(k1)
510 LONG_L $14, VCPU_R14(k1)
511 LONG_L $15, VCPU_R15(k1)
512 LONG_L $16, VCPU_R16(k1)
513 LONG_L $17, VCPU_R17(k1)
514 LONG_L $18, VCPU_R18(k1)
515 LONG_L $19, VCPU_R19(k1)
516 LONG_L $20, VCPU_R20(k1)
517 LONG_L $21, VCPU_R21(k1)
518 LONG_L $22, VCPU_R22(k1)
519 LONG_L $23, VCPU_R23(k1)
520 LONG_L $24, VCPU_R24(k1)
521 LONG_L $25, VCPU_R25(k1)
522
523 /* $/k1 loaded later */
524 LONG_L $28, VCPU_R28(k1)
525 LONG_L $29, VCPU_R29(k1)
526 LONG_L $30, VCPU_R30(k1)
527 LONG_L $31, VCPU_R31(k1)
b680f70f
SL
528
529FEXPORT(__kvm_mips_skip_guest_restore)
2c07ebbd
DD
530 LONG_L k0, VCPU_HI(k1)
531 mthi k0
b680f70f 532
2c07ebbd
DD
533 LONG_L k0, VCPU_LO(k1)
534 mtlo k0
b680f70f 535
2c07ebbd
DD
536 LONG_L k0, VCPU_R26(k1)
537 LONG_L k1, VCPU_R27(k1)
b680f70f 538
2c07ebbd 539 eret
b680f70f
SL
540
541__kvm_mips_return_to_host:
2c07ebbd
DD
542 /* EBASE is already pointing to Linux */
543 LONG_L k1, VCPU_HOST_STACK(k1)
ea69f28d 544 INT_ADDIU k1,k1, -PT_SIZE
2c07ebbd
DD
545
546 /* Restore host DDATA_LO */
547 LONG_L k0, PT_HOST_USERLOCAL(k1)
548 mtc0 k0, CP0_DDATA_LO
549
550 /* Restore host ASID */
551 LONG_L k0, PT_HOST_ASID(sp)
552 andi k0, 0xff
553 mtc0 k0,CP0_ENTRYHI
554 ehb
555
556 /* Load context saved on the host stack */
557 LONG_L $0, PT_R0(k1)
558 LONG_L $1, PT_R1(k1)
559
d116e812
DCZ
560 /*
561 * r2/v0 is the return code, shift it down by 2 (arithmetic)
562 * to recover the err code
563 */
ea69f28d 564 INT_SRA k0, v0, 2
2c07ebbd
DD
565 move $2, k0
566
567 LONG_L $3, PT_R3(k1)
568 LONG_L $4, PT_R4(k1)
569 LONG_L $5, PT_R5(k1)
570 LONG_L $6, PT_R6(k1)
571 LONG_L $7, PT_R7(k1)
572 LONG_L $8, PT_R8(k1)
573 LONG_L $9, PT_R9(k1)
574 LONG_L $10, PT_R10(k1)
575 LONG_L $11, PT_R11(k1)
576 LONG_L $12, PT_R12(k1)
577 LONG_L $13, PT_R13(k1)
578 LONG_L $14, PT_R14(k1)
579 LONG_L $15, PT_R15(k1)
580 LONG_L $16, PT_R16(k1)
581 LONG_L $17, PT_R17(k1)
582 LONG_L $18, PT_R18(k1)
583 LONG_L $19, PT_R19(k1)
584 LONG_L $20, PT_R20(k1)
585 LONG_L $21, PT_R21(k1)
586 LONG_L $22, PT_R22(k1)
587 LONG_L $23, PT_R23(k1)
588 LONG_L $24, PT_R24(k1)
589 LONG_L $25, PT_R25(k1)
590
591 /* Host k0/k1 were not saved */
592
593 LONG_L $28, PT_R28(k1)
594 LONG_L $29, PT_R29(k1)
595 LONG_L $30, PT_R30(k1)
596
597 LONG_L k0, PT_HI(k1)
598 mthi k0
599
600 LONG_L k0, PT_LO(k1)
601 mtlo k0
602
603 /* Restore RDHWR access */
ea69f28d 604 PTR_LI k0, 0x2000000F
2c07ebbd
DD
605 mtc0 k0, CP0_HWRENA
606
2c07ebbd
DD
607 /* Restore RA, which is the address we will return to */
608 LONG_L ra, PT_R31(k1)
609 j ra
610 nop
611
b680f70f
SL
612VECTOR_END(MIPSX(GuestExceptionEnd))
613.end MIPSX(GuestException)
614
615MIPSX(exceptions):
616 ####
617 ##### The exception handlers.
618 #####
619 .word _C_LABEL(MIPSX(GuestException)) # 0
620 .word _C_LABEL(MIPSX(GuestException)) # 1
621 .word _C_LABEL(MIPSX(GuestException)) # 2
622 .word _C_LABEL(MIPSX(GuestException)) # 3
623 .word _C_LABEL(MIPSX(GuestException)) # 4
624 .word _C_LABEL(MIPSX(GuestException)) # 5
625 .word _C_LABEL(MIPSX(GuestException)) # 6
626 .word _C_LABEL(MIPSX(GuestException)) # 7
627 .word _C_LABEL(MIPSX(GuestException)) # 8
628 .word _C_LABEL(MIPSX(GuestException)) # 9
629 .word _C_LABEL(MIPSX(GuestException)) # 10
630 .word _C_LABEL(MIPSX(GuestException)) # 11
631 .word _C_LABEL(MIPSX(GuestException)) # 12
632 .word _C_LABEL(MIPSX(GuestException)) # 13
633 .word _C_LABEL(MIPSX(GuestException)) # 14
634 .word _C_LABEL(MIPSX(GuestException)) # 15
635 .word _C_LABEL(MIPSX(GuestException)) # 16
636 .word _C_LABEL(MIPSX(GuestException)) # 17
637 .word _C_LABEL(MIPSX(GuestException)) # 18
638 .word _C_LABEL(MIPSX(GuestException)) # 19
639 .word _C_LABEL(MIPSX(GuestException)) # 20
640 .word _C_LABEL(MIPSX(GuestException)) # 21
641 .word _C_LABEL(MIPSX(GuestException)) # 22
642 .word _C_LABEL(MIPSX(GuestException)) # 23
643 .word _C_LABEL(MIPSX(GuestException)) # 24
644 .word _C_LABEL(MIPSX(GuestException)) # 25
645 .word _C_LABEL(MIPSX(GuestException)) # 26
646 .word _C_LABEL(MIPSX(GuestException)) # 27
647 .word _C_LABEL(MIPSX(GuestException)) # 28
648 .word _C_LABEL(MIPSX(GuestException)) # 29
649 .word _C_LABEL(MIPSX(GuestException)) # 30
650 .word _C_LABEL(MIPSX(GuestException)) # 31