MIPS: KVM: Save k0 straight into VCPU structure
[linux-2.6-block.git] / arch / mips / kvm / entry.c
CommitLineData
90e9311a
JH
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Generation of main entry point for the guest, exception handling.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 *
11 * Copyright (C) 2016 Imagination Technologies Ltd.
12 */
13
14#include <linux/kvm_host.h>
15#include <asm/msa.h>
16#include <asm/setup.h>
17#include <asm/uasm.h>
18
19/* Register names */
20#define ZERO 0
21#define AT 1
22#define V0 2
23#define V1 3
24#define A0 4
25#define A1 5
26
27#if _MIPS_SIM == _MIPS_SIM_ABI32
28#define T0 8
29#define T1 9
30#define T2 10
31#define T3 11
32#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
33
34#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
35#define T0 12
36#define T1 13
37#define T2 14
38#define T3 15
39#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
40
41#define S0 16
42#define S1 17
43#define T9 25
44#define K0 26
45#define K1 27
46#define GP 28
47#define SP 29
48#define RA 31
49
50/* Some CP0 registers */
51#define C0_HWRENA 7, 0
52#define C0_BADVADDR 8, 0
53#define C0_ENTRYHI 10, 0
54#define C0_STATUS 12, 0
55#define C0_CAUSE 13, 0
56#define C0_EPC 14, 0
57#define C0_EBASE 15, 1
90e9311a
JH
58#define C0_CONFIG5 16, 5
59#define C0_DDATA_LO 28, 3
60#define C0_ERROREPC 30, 0
61
62#define CALLFRAME_SIZ 32
63
1e5217f5
JH
64static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
65static unsigned int scratch_tmp[2] = { C0_ERROREPC };
66
90e9311a
JH
67enum label_id {
68 label_fpu_1 = 1,
69 label_msa_1,
70 label_return_to_host,
71 label_kernel_asid,
1f9ca62c 72 label_exit_common,
90e9311a
JH
73};
74
75UASM_L_LA(_fpu_1)
76UASM_L_LA(_msa_1)
77UASM_L_LA(_return_to_host)
78UASM_L_LA(_kernel_asid)
1f9ca62c 79UASM_L_LA(_exit_common)
90e9311a
JH
80
81static void *kvm_mips_build_enter_guest(void *addr);
82static void *kvm_mips_build_ret_from_exit(void *addr);
83static void *kvm_mips_build_ret_to_guest(void *addr);
84static void *kvm_mips_build_ret_to_host(void *addr);
85
1e5217f5
JH
86/**
87 * kvm_mips_entry_setup() - Perform global setup for entry code.
88 *
89 * Perform global setup for entry code, such as choosing a scratch register.
90 *
91 * Returns: 0 on success.
92 * -errno on failure.
93 */
94int kvm_mips_entry_setup(void)
95{
96 /*
97 * We prefer to use KScratchN registers if they are available over the
98 * defaults above, which may not work on all cores.
99 */
100 unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc;
101
102 /* Pick a scratch register for storing VCPU */
103 if (kscratch_mask) {
104 scratch_vcpu[0] = 31;
105 scratch_vcpu[1] = ffs(kscratch_mask) - 1;
106 kscratch_mask &= ~BIT(scratch_vcpu[1]);
107 }
108
109 /* Pick a scratch register to use as a temp for saving state */
110 if (kscratch_mask) {
111 scratch_tmp[0] = 31;
112 scratch_tmp[1] = ffs(kscratch_mask) - 1;
113 kscratch_mask &= ~BIT(scratch_tmp[1]);
114 }
115
116 return 0;
117}
118
119static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
120 unsigned int frame)
121{
122 /* Save the VCPU scratch register value in cp0_epc of the stack frame */
123 uasm_i_mfc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
124 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
125
126 /* Save the temp scratch register value in cp0_cause of stack frame */
127 if (scratch_tmp[0] == 31) {
128 uasm_i_mfc0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
129 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
130 }
131}
132
133static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
134 unsigned int frame)
135{
136 /*
137 * Restore host scratch register values saved by
138 * kvm_mips_build_save_scratch().
139 */
140 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
141 uasm_i_mtc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
142
143 if (scratch_tmp[0] == 31) {
144 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
145 uasm_i_mtc0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
146 }
147}
148
90e9311a
JH
149/**
150 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
151 * @addr: Address to start writing code.
152 *
153 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
154 * conforms to the following prototype:
155 *
156 * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
157 *
158 * The exit from the guest and return to the caller is handled by the code
159 * generated by kvm_mips_build_ret_to_host().
160 *
161 * Returns: Next address after end of written function.
162 */
163void *kvm_mips_build_vcpu_run(void *addr)
164{
165 u32 *p = addr;
166 unsigned int i;
167
168 /*
169 * A0: run
170 * A1: vcpu
171 */
172
173 /* k0/k1 not being used in host kernel context */
174 uasm_i_addiu(&p, K1, SP, -(int)sizeof(struct pt_regs));
175 for (i = 16; i < 32; ++i) {
176 if (i == 24)
177 i = 28;
178 UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
179 }
180
181 /* Save hi/lo */
182 uasm_i_mflo(&p, V0);
183 UASM_i_SW(&p, V0, offsetof(struct pt_regs, lo), K1);
184 uasm_i_mfhi(&p, V1);
185 UASM_i_SW(&p, V1, offsetof(struct pt_regs, hi), K1);
186
187 /* Save host status */
188 uasm_i_mfc0(&p, V0, C0_STATUS);
189 UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
190
1e5217f5
JH
191 /* Save scratch registers, will be used to store pointer to vcpu etc */
192 kvm_mips_build_save_scratch(&p, V1, K1);
90e9311a 193
1e5217f5
JH
194 /* VCPU scratch register has pointer to vcpu */
195 uasm_i_mtc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
196
197 /* Offset into vcpu->arch */
198 uasm_i_addiu(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
199
200 /*
201 * Save the host stack to VCPU, used for exception processing
202 * when we exit from the Guest
203 */
204 UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
205
206 /* Save the kernel gp as well */
207 UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
208
209 /*
210 * Setup status register for running the guest in UM, interrupts
211 * are disabled
212 */
213 UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV);
214 uasm_i_mtc0(&p, K0, C0_STATUS);
215 uasm_i_ehb(&p);
216
217 /* load up the new EBASE */
218 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
219 uasm_i_mtc0(&p, K0, C0_EBASE);
220
221 /*
222 * Now that the new EBASE has been loaded, unset BEV, set
223 * interrupt mask as it was but make sure that timer interrupts
224 * are enabled
225 */
226 uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE);
227 uasm_i_andi(&p, V0, V0, ST0_IM);
228 uasm_i_or(&p, K0, K0, V0);
229 uasm_i_mtc0(&p, K0, C0_STATUS);
230 uasm_i_ehb(&p);
231
232 p = kvm_mips_build_enter_guest(p);
233
234 return p;
235}
236
237/**
238 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
239 * @addr: Address to start writing code.
240 *
241 * Assemble the code to resume guest execution. This code is common between the
242 * initial entry into the guest from the host, and returning from the exit
243 * handler back to the guest.
244 *
245 * Returns: Next address after end of written function.
246 */
247static void *kvm_mips_build_enter_guest(void *addr)
248{
249 u32 *p = addr;
250 unsigned int i;
251 struct uasm_label labels[2];
252 struct uasm_reloc relocs[2];
253 struct uasm_label *l = labels;
254 struct uasm_reloc *r = relocs;
255
256 memset(labels, 0, sizeof(labels));
257 memset(relocs, 0, sizeof(relocs));
258
259 /* Set Guest EPC */
260 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
261 uasm_i_mtc0(&p, T0, C0_EPC);
262
263 /* Set the ASID for the Guest Kernel */
264 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
265 UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
266 T0);
267 uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
268 uasm_i_xori(&p, T0, T0, KSU_USER);
269 uasm_il_bnez(&p, &r, T0, label_kernel_asid);
270 uasm_i_addiu(&p, T1, K1,
271 offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
272 /* else user */
273 uasm_i_addiu(&p, T1, K1,
274 offsetof(struct kvm_vcpu_arch, guest_user_asid));
275 uasm_l_kernel_asid(&l, p);
276
277 /* t1: contains the base of the ASID array, need to get the cpu id */
278 /* smp_processor_id */
279 UASM_i_LW(&p, T2, offsetof(struct thread_info, cpu), GP);
280 /* x4 */
281 uasm_i_sll(&p, T2, T2, 2);
282 UASM_i_ADDU(&p, T3, T1, T2);
283 UASM_i_LW(&p, K0, 0, T3);
284#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
285 /* x sizeof(struct cpuinfo_mips)/4 */
286 uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
287 uasm_i_mul(&p, T2, T2, T3);
288
289 UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
290 UASM_i_ADDU(&p, AT, AT, T2);
291 UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
292 uasm_i_and(&p, K0, K0, T2);
293#else
294 uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
295#endif
296 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
297 uasm_i_ehb(&p);
298
299 /* Disable RDHWR access */
300 uasm_i_mtc0(&p, ZERO, C0_HWRENA);
301
302 /* load the guest context from VCPU and return */
303 for (i = 1; i < 32; ++i) {
304 /* Guest k0/k1 loaded later */
305 if (i == K0 || i == K1)
306 continue;
307 UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
308 }
309
310 /* Restore hi/lo */
311 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
312 uasm_i_mthi(&p, K0);
313
314 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
315 uasm_i_mtlo(&p, K0);
316
317 /* Restore the guest's k0/k1 registers */
318 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
319 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
320
321 /* Jump to guest */
322 uasm_i_eret(&p);
323
324 uasm_resolve_relocs(relocs, labels);
325
326 return p;
327}
328
329/**
330 * kvm_mips_build_exception() - Assemble first level guest exception handler.
331 * @addr: Address to start writing code.
1f9ca62c 332 * @handler: Address of common handler (within range of @addr).
90e9311a
JH
333 *
334 * Assemble exception vector code for guest execution. The generated vector will
1f9ca62c 335 * branch to the common exception handler generated by kvm_mips_build_exit().
90e9311a
JH
336 *
337 * Returns: Next address after end of written function.
338 */
1f9ca62c 339void *kvm_mips_build_exception(void *addr, void *handler)
90e9311a
JH
340{
341 u32 *p = addr;
1f9ca62c
JH
342 struct uasm_label labels[2];
343 struct uasm_reloc relocs[2];
344 struct uasm_label *l = labels;
345 struct uasm_reloc *r = relocs;
346
347 memset(labels, 0, sizeof(labels));
348 memset(relocs, 0, sizeof(relocs));
90e9311a 349
eadfb501
JH
350 /* Save guest k1 into scratch register */
351 uasm_i_mtc0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
90e9311a 352
eadfb501
JH
353 /* Get the VCPU pointer from the VCPU scratch register */
354 uasm_i_mfc0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
355 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
356
357 /* Save guest k0 into VCPU structure */
358 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
90e9311a 359
1f9ca62c
JH
360 /* Branch to the common handler */
361 uasm_il_b(&p, &r, label_exit_common);
90e9311a
JH
362 uasm_i_nop(&p);
363
1f9ca62c
JH
364 uasm_l_exit_common(&l, handler);
365 uasm_resolve_relocs(relocs, labels);
366
90e9311a
JH
367 return p;
368}
369
370/**
371 * kvm_mips_build_exit() - Assemble common guest exit handler.
372 * @addr: Address to start writing code.
373 *
374 * Assemble the generic guest exit handling code. This is called by the
375 * exception vectors (generated by kvm_mips_build_exception()), and calls
376 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
377 * depending on the return value.
378 *
379 * Returns: Next address after end of written function.
380 */
381void *kvm_mips_build_exit(void *addr)
382{
383 u32 *p = addr;
384 unsigned int i;
385 struct uasm_label labels[3];
386 struct uasm_reloc relocs[3];
387 struct uasm_label *l = labels;
388 struct uasm_reloc *r = relocs;
389
390 memset(labels, 0, sizeof(labels));
391 memset(relocs, 0, sizeof(relocs));
392
393 /*
394 * Generic Guest exception handler. We end up here when the guest
395 * does something that causes a trap to kernel mode.
eadfb501
JH
396 *
397 * Both k0/k1 registers will have already been saved (k0 into the vcpu
398 * structure, and k1 into the scratch_tmp register).
399 *
400 * The k1 register will already contain the kvm_vcpu_arch pointer.
90e9311a
JH
401 */
402
90e9311a
JH
403 /* Start saving Guest context to VCPU */
404 for (i = 0; i < 32; ++i) {
405 /* Guest k0/k1 saved later */
406 if (i == K0 || i == K1)
407 continue;
408 UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
409 }
410
411 /* We need to save hi/lo and restore them on the way out */
412 uasm_i_mfhi(&p, T0);
413 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
414
415 uasm_i_mflo(&p, T0);
416 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
417
eadfb501
JH
418 /* Finally save guest k1 to VCPU */
419 uasm_i_ehb(&p);
1e5217f5 420 uasm_i_mfc0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
90e9311a
JH
421 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
422
423 /* Now that context has been saved, we can use other registers */
424
425 /* Restore vcpu */
1e5217f5 426 uasm_i_mfc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
427 uasm_i_move(&p, S1, A1);
428
429 /* Restore run (vcpu->run) */
430 UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
431 /* Save pointer to run in s0, will be saved by the compiler */
432 uasm_i_move(&p, S0, A0);
433
434 /*
435 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
436 * the exception
437 */
438 uasm_i_mfc0(&p, K0, C0_EPC);
439 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
440
441 uasm_i_mfc0(&p, K0, C0_BADVADDR);
442 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
443 K1);
444
445 uasm_i_mfc0(&p, K0, C0_CAUSE);
446 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
447
448 /* Now restore the host state just enough to run the handlers */
449
450 /* Switch EBASE to the one used by Linux */
451 /* load up the host EBASE */
452 uasm_i_mfc0(&p, V0, C0_STATUS);
453
454 uasm_i_lui(&p, AT, ST0_BEV >> 16);
455 uasm_i_or(&p, K0, V0, AT);
456
457 uasm_i_mtc0(&p, K0, C0_STATUS);
458 uasm_i_ehb(&p);
459
460 UASM_i_LA_mostly(&p, K0, (long)&ebase);
461 UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
462 uasm_i_mtc0(&p, K0, C0_EBASE);
463
d37f4038
JH
464 if (raw_cpu_has_fpu) {
465 /*
466 * If FPU is enabled, save FCR31 and clear it so that later
467 * ctc1's don't trigger FPE for pending exceptions.
468 */
469 uasm_i_lui(&p, AT, ST0_CU1 >> 16);
470 uasm_i_and(&p, V1, V0, AT);
471 uasm_il_beqz(&p, &r, V1, label_fpu_1);
472 uasm_i_nop(&p);
473 uasm_i_cfc1(&p, T0, 31);
474 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
475 K1);
476 uasm_i_ctc1(&p, ZERO, 31);
477 uasm_l_fpu_1(&l, p);
478 }
90e9311a 479
38ea7a71
JH
480 if (cpu_has_msa) {
481 /*
482 * If MSA is enabled, save MSACSR and clear it so that later
483 * instructions don't trigger MSAFPE for pending exceptions.
484 */
485 uasm_i_mfc0(&p, T0, C0_CONFIG5);
486 uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
487 uasm_il_beqz(&p, &r, T0, label_msa_1);
488 uasm_i_nop(&p);
489 uasm_i_cfcmsa(&p, T0, MSA_CSR);
490 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
491 K1);
492 uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
493 uasm_l_msa_1(&l, p);
494 }
90e9311a
JH
495
496 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
497 uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
498 uasm_i_and(&p, V0, V0, AT);
499 uasm_i_lui(&p, AT, ST0_CU0 >> 16);
500 uasm_i_or(&p, V0, V0, AT);
501 uasm_i_mtc0(&p, V0, C0_STATUS);
502 uasm_i_ehb(&p);
503
504 /* Load up host GP */
505 UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
506
507 /* Need a stack before we can jump to "C" */
508 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
509
510 /* Saved host state */
511 uasm_i_addiu(&p, SP, SP, -(int)sizeof(struct pt_regs));
512
513 /*
514 * XXXKYMA do we need to load the host ASID, maybe not because the
515 * kernel entries are marked GLOBAL, need to verify
516 */
517
1e5217f5
JH
518 /* Restore host scratch registers, as we'll have clobbered them */
519 kvm_mips_build_restore_scratch(&p, K0, SP);
90e9311a
JH
520
521 /* Restore RDHWR access */
522 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
523 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
524 uasm_i_mtc0(&p, K0, C0_HWRENA);
525
526 /* Jump to handler */
527 /*
528 * XXXKYMA: not sure if this is safe, how large is the stack??
529 * Now jump to the kvm_mips_handle_exit() to see if we can deal
530 * with this in the kernel
531 */
532 UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
533 uasm_i_jalr(&p, RA, T9);
534 uasm_i_addiu(&p, SP, SP, -CALLFRAME_SIZ);
535
536 uasm_resolve_relocs(relocs, labels);
537
538 p = kvm_mips_build_ret_from_exit(p);
539
540 return p;
541}
542
543/**
544 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
545 * @addr: Address to start writing code.
546 *
547 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
548 * resuming the guest or returning to the host depending on the return value.
549 *
550 * Returns: Next address after end of written function.
551 */
552static void *kvm_mips_build_ret_from_exit(void *addr)
553{
554 u32 *p = addr;
555 struct uasm_label labels[2];
556 struct uasm_reloc relocs[2];
557 struct uasm_label *l = labels;
558 struct uasm_reloc *r = relocs;
559
560 memset(labels, 0, sizeof(labels));
561 memset(relocs, 0, sizeof(relocs));
562
563 /* Return from handler Make sure interrupts are disabled */
564 uasm_i_di(&p, ZERO);
565 uasm_i_ehb(&p);
566
567 /*
568 * XXXKYMA: k0/k1 could have been blown away if we processed
569 * an exception while we were handling the exception from the
570 * guest, reload k1
571 */
572
573 uasm_i_move(&p, K1, S1);
574 uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
575
576 /*
577 * Check return value, should tell us if we are returning to the
578 * host (handle I/O etc)or resuming the guest
579 */
580 uasm_i_andi(&p, T0, V0, RESUME_HOST);
581 uasm_il_bnez(&p, &r, T0, label_return_to_host);
582 uasm_i_nop(&p);
583
584 p = kvm_mips_build_ret_to_guest(p);
585
586 uasm_l_return_to_host(&l, p);
587 p = kvm_mips_build_ret_to_host(p);
588
589 uasm_resolve_relocs(relocs, labels);
590
591 return p;
592}
593
594/**
595 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
596 * @addr: Address to start writing code.
597 *
598 * Assemble the code to handle return from the guest exit handler
599 * (kvm_mips_handle_exit()) back to the guest.
600 *
601 * Returns: Next address after end of written function.
602 */
603static void *kvm_mips_build_ret_to_guest(void *addr)
604{
605 u32 *p = addr;
606
1e5217f5
JH
607 /* Put the saved pointer to vcpu (s1) back into the scratch register */
608 uasm_i_mtc0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
90e9311a
JH
609
610 /* Load up the Guest EBASE to minimize the window where BEV is set */
611 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
612
613 /* Switch EBASE back to the one used by KVM */
614 uasm_i_mfc0(&p, V1, C0_STATUS);
615 uasm_i_lui(&p, AT, ST0_BEV >> 16);
616 uasm_i_or(&p, K0, V1, AT);
617 uasm_i_mtc0(&p, K0, C0_STATUS);
618 uasm_i_ehb(&p);
619 uasm_i_mtc0(&p, T0, C0_EBASE);
620
621 /* Setup status register for running guest in UM */
622 uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
623 UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
624 uasm_i_and(&p, V1, V1, AT);
625 uasm_i_mtc0(&p, V1, C0_STATUS);
626 uasm_i_ehb(&p);
627
628 p = kvm_mips_build_enter_guest(p);
629
630 return p;
631}
632
633/**
634 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
635 * @addr: Address to start writing code.
636 *
637 * Assemble the code to handle return from the guest exit handler
638 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
639 * function generated by kvm_mips_build_vcpu_run().
640 *
641 * Returns: Next address after end of written function.
642 */
643static void *kvm_mips_build_ret_to_host(void *addr)
644{
645 u32 *p = addr;
646 unsigned int i;
647
648 /* EBASE is already pointing to Linux */
649 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
650 uasm_i_addiu(&p, K1, K1, -(int)sizeof(struct pt_regs));
651
90e9311a
JH
652 /*
653 * r2/v0 is the return code, shift it down by 2 (arithmetic)
654 * to recover the err code
655 */
656 uasm_i_sra(&p, K0, V0, 2);
657 uasm_i_move(&p, V0, K0);
658
659 /* Load context saved on the host stack */
660 for (i = 16; i < 31; ++i) {
661 if (i == 24)
662 i = 28;
663 UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
664 }
665
666 UASM_i_LW(&p, K0, offsetof(struct pt_regs, hi), K1);
667 uasm_i_mthi(&p, K0);
668
669 UASM_i_LW(&p, K0, offsetof(struct pt_regs, lo), K1);
670 uasm_i_mtlo(&p, K0);
671
672 /* Restore RDHWR access */
673 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
674 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
675 uasm_i_mtc0(&p, K0, C0_HWRENA);
676
677 /* Restore RA, which is the address we will return to */
678 UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
679 uasm_i_jr(&p, RA);
680 uasm_i_nop(&p);
681
682 return p;
683}
684