KVM: MIPS/VZ: Support guest load-linked bit
[linux-2.6-block.git] / arch / mips / kvm / vz.c
CommitLineData
c992a4f6
JH
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Support for hardware virtualization extensions
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Yann Le Du <ledu@kymasys.com>
10 */
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/preempt.h>
16#include <linux/vmalloc.h>
17#include <asm/cacheflush.h>
18#include <asm/cacheops.h>
19#include <asm/cmpxchg.h>
20#include <asm/fpu.h>
21#include <asm/hazards.h>
22#include <asm/inst.h>
23#include <asm/mmu_context.h>
24#include <asm/r4kcache.h>
25#include <asm/time.h>
26#include <asm/tlb.h>
27#include <asm/tlbex.h>
28
29#include <linux/kvm_host.h>
30
31#include "interrupt.h"
32
33#include "trace.h"
34
35/* Pointers to last VCPU loaded on each physical CPU */
36static struct kvm_vcpu *last_vcpu[NR_CPUS];
37/* Pointers to last VCPU executed on each physical CPU */
38static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
39
40/*
41 * Number of guest VTLB entries to use, so we can catch inconsistency between
42 * CPUs.
43 */
44static unsigned int kvm_vz_guest_vtlb_size;
45
46static inline long kvm_vz_read_gc0_ebase(void)
47{
48 if (sizeof(long) == 8 && cpu_has_ebase_wg)
49 return read_gc0_ebase_64();
50 else
51 return read_gc0_ebase();
52}
53
54static inline void kvm_vz_write_gc0_ebase(long v)
55{
56 /*
57 * First write with WG=1 to write upper bits, then write again in case
58 * WG should be left at 0.
59 * write_gc0_ebase_64() is no longer UNDEFINED since R6.
60 */
61 if (sizeof(long) == 8 &&
62 (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
63 write_gc0_ebase_64(v | MIPS_EBASE_WG);
64 write_gc0_ebase_64(v);
65 } else {
66 write_gc0_ebase(v | MIPS_EBASE_WG);
67 write_gc0_ebase(v);
68 }
69}
70
71/*
72 * These Config bits may be writable by the guest:
73 * Config: [K23, KU] (!TLB), K0
74 * Config1: (none)
75 * Config2: [TU, SU] (impl)
76 * Config3: ISAOnExc
77 * Config4: FTLBPageSize
78 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
79 */
80
81static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
82{
83 return CONF_CM_CMASK;
84}
85
86static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
87{
88 return 0;
89}
90
91static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
92{
93 return 0;
94}
95
96static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
97{
98 return MIPS_CONF3_ISA_OE;
99}
100
101static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
102{
103 /* no need to be exact */
104 return MIPS_CONF4_VFTLBPAGESIZE;
105}
106
107static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
108{
109 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
110
111 /* Permit MSAEn changes if MSA supported and enabled */
112 if (kvm_mips_guest_has_msa(&vcpu->arch))
113 mask |= MIPS_CONF5_MSAEN;
114
115 /*
116 * Permit guest FPU mode changes if FPU is enabled and the relevant
117 * feature exists according to FIR register.
118 */
119 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
120 if (cpu_has_ufr)
121 mask |= MIPS_CONF5_UFR;
122 if (cpu_has_fre)
123 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
124 }
125
126 return mask;
127}
128
129/*
130 * VZ optionally allows these additional Config bits to be written by root:
131 * Config: M, [MT]
132 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
133 * Config2: M
dffe042f 134 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
c992a4f6
JH
135 * VInt, SP, CDMM, MT, SM, TL]
136 * Config4: M, [VTLBSizeExt, MMUSizeExt]
137 * Config5: [MRP]
138 */
139
140static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
141{
142 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
143}
144
145static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
146{
147 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
148
149 /* Permit FPU to be present if FPU is supported */
150 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
151 mask |= MIPS_CONF1_FP;
152
153 return mask;
154}
155
156static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
157{
158 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
159}
160
161static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
162{
163 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
dffe042f 164 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
c992a4f6
JH
165
166 /* Permit MSA to be present if MSA is supported */
167 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
168 mask |= MIPS_CONF3_MSA;
169
170 return mask;
171}
172
173static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
174{
175 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
176}
177
178static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
179{
180 return kvm_vz_config5_guest_wrmask(vcpu);
181}
182
183static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
184{
185 /* VZ guest has already converted gva to gpa */
186 return gva;
187}
188
189static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
190{
191 set_bit(priority, &vcpu->arch.pending_exceptions);
192 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
193}
194
195static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
196{
197 clear_bit(priority, &vcpu->arch.pending_exceptions);
198 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
199}
200
201static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
202{
203 /*
204 * timer expiry is asynchronous to vcpu execution therefore defer guest
205 * cp0 accesses
206 */
207 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
208}
209
210static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
211{
212 /*
213 * timer expiry is asynchronous to vcpu execution therefore defer guest
214 * cp0 accesses
215 */
216 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
217}
218
219static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
220 struct kvm_mips_interrupt *irq)
221{
222 int intr = (int)irq->irq;
223
224 /*
225 * interrupts are asynchronous to vcpu execution therefore defer guest
226 * cp0 accesses
227 */
228 switch (intr) {
229 case 2:
230 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
231 break;
232
233 case 3:
234 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
235 break;
236
237 case 4:
238 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
239 break;
240
241 default:
242 break;
243 }
244
245}
246
247static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
248 struct kvm_mips_interrupt *irq)
249{
250 int intr = (int)irq->irq;
251
252 /*
253 * interrupts are asynchronous to vcpu execution therefore defer guest
254 * cp0 accesses
255 */
256 switch (intr) {
257 case -2:
258 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
259 break;
260
261 case -3:
262 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
263 break;
264
265 case -4:
266 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
267 break;
268
269 default:
270 break;
271 }
272
273}
274
275static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
276 [MIPS_EXC_INT_TIMER] = C_IRQ5,
277 [MIPS_EXC_INT_IO] = C_IRQ0,
278 [MIPS_EXC_INT_IPI_1] = C_IRQ1,
279 [MIPS_EXC_INT_IPI_2] = C_IRQ2,
280};
281
282static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
283 u32 cause)
284{
285 u32 irq = (priority < MIPS_EXC_MAX) ?
286 kvm_vz_priority_to_irq[priority] : 0;
287
288 switch (priority) {
289 case MIPS_EXC_INT_TIMER:
290 set_gc0_cause(C_TI);
291 break;
292
293 case MIPS_EXC_INT_IO:
294 case MIPS_EXC_INT_IPI_1:
295 case MIPS_EXC_INT_IPI_2:
296 if (cpu_has_guestctl2)
297 set_c0_guestctl2(irq);
298 else
299 set_gc0_cause(irq);
300 break;
301
302 default:
303 break;
304 }
305
306 clear_bit(priority, &vcpu->arch.pending_exceptions);
307 return 1;
308}
309
310static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
311 u32 cause)
312{
313 u32 irq = (priority < MIPS_EXC_MAX) ?
314 kvm_vz_priority_to_irq[priority] : 0;
315
316 switch (priority) {
317 case MIPS_EXC_INT_TIMER:
318 /*
319 * Call to kvm_write_c0_guest_compare() clears Cause.TI in
320 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
321 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
322 * supported or if not using GuestCtl2 Hardware Clear.
323 */
324 if (cpu_has_guestctl2) {
325 if (!(read_c0_guestctl2() & (irq << 14)))
326 clear_c0_guestctl2(irq);
327 } else {
328 clear_gc0_cause(irq);
329 }
330 break;
331
332 case MIPS_EXC_INT_IO:
333 case MIPS_EXC_INT_IPI_1:
334 case MIPS_EXC_INT_IPI_2:
335 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
336 if (cpu_has_guestctl2) {
337 if (!(read_c0_guestctl2() & (irq << 14)))
338 clear_c0_guestctl2(irq);
339 } else {
340 clear_gc0_cause(irq);
341 }
342 break;
343
344 default:
345 break;
346 }
347
348 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
349 return 1;
350}
351
352/*
353 * VZ guest timer handling.
354 */
355
356/**
357 * _kvm_vz_restore_stimer() - Restore soft timer state.
358 * @vcpu: Virtual CPU.
359 * @compare: CP0_Compare register value, restored by caller.
360 * @cause: CP0_Cause register to restore.
361 *
362 * Restore VZ state relating to the soft timer.
363 */
364static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
365 u32 cause)
366{
367 /*
368 * Avoid spurious counter interrupts by setting Guest CP0_Count to just
369 * after Guest CP0_Compare.
370 */
371 write_c0_gtoffset(compare - read_c0_count());
372
373 back_to_back_c0_hazard();
374 write_gc0_cause(cause);
375}
376
377/**
378 * kvm_vz_restore_timer() - Restore guest timer state.
379 * @vcpu: Virtual CPU.
380 *
381 * Restore soft timer state from saved context.
382 */
383static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
384{
385 struct mips_coproc *cop0 = vcpu->arch.cop0;
386 u32 cause, compare;
387
388 compare = kvm_read_sw_gc0_compare(cop0);
389 cause = kvm_read_sw_gc0_cause(cop0);
390
391 write_gc0_compare(compare);
392 _kvm_vz_restore_stimer(vcpu, compare, cause);
393}
394
395/**
396 * kvm_vz_save_timer() - Save guest timer state.
397 * @vcpu: Virtual CPU.
398 *
399 * Save VZ guest timer state.
400 */
401static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
402{
403 struct mips_coproc *cop0 = vcpu->arch.cop0;
404 u32 compare, cause;
405
406 compare = read_gc0_compare();
407 cause = read_gc0_cause();
408
409 /* save timer-related state to VCPU context */
410 kvm_write_sw_gc0_cause(cop0, cause);
411 kvm_write_sw_gc0_compare(cop0, compare);
412}
413
4b7de028
JH
414/**
415 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
416 * @inst: 32-bit instruction encoding.
417 *
418 * Finds whether @inst encodes an EVA memory access instruction, which would
419 * indicate that emulation of it should access the user mode address space
420 * instead of the kernel mode address space. This matters for MUSUK segments
421 * which are TLB mapped for user mode but unmapped for kernel mode.
422 *
423 * Returns: Whether @inst encodes an EVA accessor instruction.
424 */
425static bool is_eva_access(union mips_instruction inst)
426{
427 if (inst.spec3_format.opcode != spec3_op)
428 return false;
429
430 switch (inst.spec3_format.func) {
431 case lwle_op:
432 case lwre_op:
433 case cachee_op:
434 case sbe_op:
435 case she_op:
436 case sce_op:
437 case swe_op:
438 case swle_op:
439 case swre_op:
440 case prefe_op:
441 case lbue_op:
442 case lhue_op:
443 case lbe_op:
444 case lhe_op:
445 case lle_op:
446 case lwe_op:
447 return true;
448 default:
449 return false;
450 }
451}
452
453/**
454 * is_eva_am_mapped() - Find whether an access mode is mapped.
455 * @vcpu: KVM VCPU state.
456 * @am: 3-bit encoded access mode.
457 * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
458 *
459 * Decode @am to find whether it encodes a mapped segment for the current VCPU
460 * state. Where necessary @eu and the actual instruction causing the fault are
461 * taken into account to make the decision.
462 *
463 * Returns: Whether the VCPU faulted on a TLB mapped address.
464 */
465static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
466{
467 u32 am_lookup;
468 int err;
469
470 /*
471 * Interpret access control mode. We assume address errors will already
472 * have been caught by the guest, leaving us with:
473 * AM UM SM KM 31..24 23..16
474 * UK 0 000 Unm 0 0
475 * MK 1 001 TLB 1
476 * MSK 2 010 TLB TLB 1
477 * MUSK 3 011 TLB TLB TLB 1
478 * MUSUK 4 100 TLB TLB Unm 0 1
479 * USK 5 101 Unm Unm 0 0
480 * - 6 110 0 0
481 * UUSK 7 111 Unm Unm Unm 0 0
482 *
483 * We shift a magic value by AM across the sign bit to find if always
484 * TLB mapped, and if not shift by 8 again to find if it depends on KM.
485 */
486 am_lookup = 0x70080000 << am;
487 if ((s32)am_lookup < 0) {
488 /*
489 * MK, MSK, MUSK
490 * Always TLB mapped, unless SegCtl.EU && ERL
491 */
492 if (!eu || !(read_gc0_status() & ST0_ERL))
493 return true;
494 } else {
495 am_lookup <<= 8;
496 if ((s32)am_lookup < 0) {
497 union mips_instruction inst;
498 unsigned int status;
499 u32 *opc;
500
501 /*
502 * MUSUK
503 * TLB mapped if not in kernel mode
504 */
505 status = read_gc0_status();
506 if (!(status & (ST0_EXL | ST0_ERL)) &&
507 (status & ST0_KSU))
508 return true;
509 /*
510 * EVA access instructions in kernel
511 * mode access user address space.
512 */
513 opc = (u32 *)vcpu->arch.pc;
514 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
515 opc += 1;
516 err = kvm_get_badinstr(opc, vcpu, &inst.word);
517 if (!err && is_eva_access(inst))
518 return true;
519 }
520 }
521
522 return false;
523}
524
c992a4f6
JH
525/**
526 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
527 * @vcpu: KVM VCPU state.
528 * @gva: Guest virtual address to convert.
529 * @gpa: Output guest physical address.
530 *
531 * Convert a guest virtual address (GVA) which is valid according to the guest
532 * context, to a guest physical address (GPA).
533 *
534 * Returns: 0 on success.
535 * -errno on failure.
536 */
537static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
538 unsigned long *gpa)
539{
540 u32 gva32 = gva;
4b7de028 541 unsigned long segctl;
c992a4f6
JH
542
543 if ((long)gva == (s32)gva32) {
544 /* Handle canonical 32-bit virtual address */
4b7de028
JH
545 if (cpu_guest_has_segments) {
546 unsigned long mask, pa;
547
548 switch (gva32 >> 29) {
549 case 0:
550 case 1: /* CFG5 (1GB) */
551 segctl = read_gc0_segctl2() >> 16;
552 mask = (unsigned long)0xfc0000000ull;
553 break;
554 case 2:
555 case 3: /* CFG4 (1GB) */
556 segctl = read_gc0_segctl2();
557 mask = (unsigned long)0xfc0000000ull;
558 break;
559 case 4: /* CFG3 (512MB) */
560 segctl = read_gc0_segctl1() >> 16;
561 mask = (unsigned long)0xfe0000000ull;
562 break;
563 case 5: /* CFG2 (512MB) */
564 segctl = read_gc0_segctl1();
565 mask = (unsigned long)0xfe0000000ull;
566 break;
567 case 6: /* CFG1 (512MB) */
568 segctl = read_gc0_segctl0() >> 16;
569 mask = (unsigned long)0xfe0000000ull;
570 break;
571 case 7: /* CFG0 (512MB) */
572 segctl = read_gc0_segctl0();
573 mask = (unsigned long)0xfe0000000ull;
574 break;
575 default:
576 /*
577 * GCC 4.9 isn't smart enough to figure out that
578 * segctl and mask are always initialised.
579 */
580 unreachable();
581 }
582
583 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
584 segctl & 0x0008))
585 goto tlb_mapped;
586
587 /* Unmapped, find guest physical address */
588 pa = (segctl << 20) & mask;
589 pa |= gva32 & ~mask;
590 *gpa = pa;
591 return 0;
592 } else if ((s32)gva32 < (s32)0xc0000000) {
c992a4f6
JH
593 /* legacy unmapped KSeg0 or KSeg1 */
594 *gpa = gva32 & 0x1fffffff;
595 return 0;
596 }
597#ifdef CONFIG_64BIT
598 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
599 /* XKPHYS */
4b7de028
JH
600 if (cpu_guest_has_segments) {
601 /*
602 * Each of the 8 regions can be overridden by SegCtl2.XR
603 * to use SegCtl1.XAM.
604 */
605 segctl = read_gc0_segctl2();
606 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
607 segctl = read_gc0_segctl1();
608 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
609 0))
610 goto tlb_mapped;
611 }
612
613 }
c992a4f6
JH
614 /*
615 * Traditionally fully unmapped.
616 * Bits 61:59 specify the CCA, which we can just mask off here.
617 * Bits 58:PABITS should be zero, but we shouldn't have got here
618 * if it wasn't.
619 */
620 *gpa = gva & 0x07ffffffffffffff;
621 return 0;
622#endif
623 }
624
4b7de028 625tlb_mapped:
c992a4f6
JH
626 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
627}
628
629/**
630 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
631 * @vcpu: KVM VCPU state.
632 * @badvaddr: Root BadVAddr.
633 * @gpa: Output guest physical address.
634 *
635 * VZ implementations are permitted to report guest virtual addresses (GVA) in
636 * BadVAddr on a root exception during guest execution, instead of the more
637 * convenient guest physical addresses (GPA). When we get a GVA, this function
638 * converts it to a GPA, taking into account guest segmentation and guest TLB
639 * state.
640 *
641 * Returns: 0 on success.
642 * -errno on failure.
643 */
644static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
645 unsigned long *gpa)
646{
647 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
648 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
649
650 /* If BadVAddr is GPA, then all is well in the world */
651 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
652 *gpa = badvaddr;
653 return 0;
654 }
655
656 /* Otherwise we'd expect it to be GVA ... */
657 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
658 "Unexpected gexccode %#x\n", gexccode))
659 return -EINVAL;
660
661 /* ... and we need to perform the GVA->GPA translation in software */
662 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
663}
664
665static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
666{
667 u32 *opc = (u32 *) vcpu->arch.pc;
668 u32 cause = vcpu->arch.host_cp0_cause;
669 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
670 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
671 u32 inst = 0;
672
673 /*
674 * Fetch the instruction.
675 */
676 if (cause & CAUSEF_BD)
677 opc += 1;
678 kvm_get_badinstr(opc, vcpu, &inst);
679
680 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
681 exccode, opc, inst, badvaddr,
682 read_gc0_status());
683 kvm_arch_vcpu_dump_regs(vcpu);
684 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
685 return RESUME_HOST;
686}
687
688static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
689 u32 *opc, u32 cause,
690 struct kvm_run *run,
691 struct kvm_vcpu *vcpu)
692{
693 struct mips_coproc *cop0 = vcpu->arch.cop0;
694 enum emulation_result er = EMULATE_DONE;
695 u32 rt, rd, sel;
696 unsigned long curr_pc;
697 unsigned long val;
698
699 /*
700 * Update PC and hold onto current PC in case there is
701 * an error and we want to rollback the PC
702 */
703 curr_pc = vcpu->arch.pc;
704 er = update_pc(vcpu, cause);
705 if (er == EMULATE_FAIL)
706 return er;
707
708 if (inst.co_format.co) {
709 switch (inst.co_format.func) {
710 case wait_op:
711 er = kvm_mips_emul_wait(vcpu);
712 break;
713 default:
714 er = EMULATE_FAIL;
715 }
716 } else {
717 rt = inst.c0r_format.rt;
718 rd = inst.c0r_format.rd;
719 sel = inst.c0r_format.sel;
720
721 switch (inst.c0r_format.rs) {
722 case dmfc_op:
723 case mfc_op:
724#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
725 cop0->stat[rd][sel]++;
726#endif
727 if (rd == MIPS_CP0_COUNT &&
728 sel == 0) { /* Count */
729 val = kvm_mips_read_count(vcpu);
730 } else if (rd == MIPS_CP0_COMPARE &&
731 sel == 0) { /* Compare */
732 val = read_gc0_compare();
273819a6
JH
733 } else if (rd == MIPS_CP0_LLADDR &&
734 sel == 0) { /* LLAddr */
735 if (cpu_guest_has_rw_llb)
736 val = read_gc0_lladdr() &
737 MIPS_LLADDR_LLB;
738 else
739 val = 0;
c992a4f6
JH
740 } else if ((rd == MIPS_CP0_PRID &&
741 (sel == 0 || /* PRid */
742 sel == 2 || /* CDMMBase */
743 sel == 3)) || /* CMGCRBase */
744 (rd == MIPS_CP0_STATUS &&
745 (sel == 2 || /* SRSCtl */
746 sel == 3)) || /* SRSMap */
747 (rd == MIPS_CP0_CONFIG &&
748 (sel == 7)) || /* Config7 */
749 (rd == MIPS_CP0_ERRCTL &&
750 (sel == 0))) { /* ErrCtl */
751 val = cop0->reg[rd][sel];
752 } else {
753 val = 0;
754 er = EMULATE_FAIL;
755 }
756
757 if (er != EMULATE_FAIL) {
758 /* Sign extend */
759 if (inst.c0r_format.rs == mfc_op)
760 val = (int)val;
761 vcpu->arch.gprs[rt] = val;
762 }
763
764 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
765 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
766 KVM_TRACE_COP0(rd, sel), val);
767 break;
768
769 case dmtc_op:
770 case mtc_op:
771#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
772 cop0->stat[rd][sel]++;
773#endif
774 val = vcpu->arch.gprs[rt];
775 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
776 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
777 KVM_TRACE_COP0(rd, sel), val);
778
779 if (rd == MIPS_CP0_COUNT &&
780 sel == 0) { /* Count */
781 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
782 } else if (rd == MIPS_CP0_COMPARE &&
783 sel == 0) { /* Compare */
784 kvm_mips_write_compare(vcpu,
785 vcpu->arch.gprs[rt],
786 true);
273819a6
JH
787 } else if (rd == MIPS_CP0_LLADDR &&
788 sel == 0) { /* LLAddr */
789 /*
790 * P5600 generates GPSI on guest MTC0 LLAddr.
791 * Only allow the guest to clear LLB.
792 */
793 if (cpu_guest_has_rw_llb &&
794 !(val & MIPS_LLADDR_LLB))
795 write_gc0_lladdr(0);
c992a4f6
JH
796 } else if (rd == MIPS_CP0_ERRCTL &&
797 (sel == 0)) { /* ErrCtl */
798 /* ignore the written value */
799 } else {
800 er = EMULATE_FAIL;
801 }
802 break;
803
804 default:
805 er = EMULATE_FAIL;
806 break;
807 }
808 }
809 /* Rollback PC only if emulation was unsuccessful */
810 if (er == EMULATE_FAIL) {
811 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
812 curr_pc, __func__, inst.word);
813
814 vcpu->arch.pc = curr_pc;
815 }
816
817 return er;
818}
819
820static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
821 u32 *opc, u32 cause,
822 struct kvm_run *run,
823 struct kvm_vcpu *vcpu)
824{
825 enum emulation_result er = EMULATE_DONE;
826 u32 cache, op_inst, op, base;
827 s16 offset;
828 struct kvm_vcpu_arch *arch = &vcpu->arch;
829 unsigned long va, curr_pc;
830
831 /*
832 * Update PC and hold onto current PC in case there is
833 * an error and we want to rollback the PC
834 */
835 curr_pc = vcpu->arch.pc;
836 er = update_pc(vcpu, cause);
837 if (er == EMULATE_FAIL)
838 return er;
839
840 base = inst.i_format.rs;
841 op_inst = inst.i_format.rt;
842 if (cpu_has_mips_r6)
843 offset = inst.spec3_format.simmediate;
844 else
845 offset = inst.i_format.simmediate;
846 cache = op_inst & CacheOp_Cache;
847 op = op_inst & CacheOp_Op;
848
849 va = arch->gprs[base] + offset;
850
851 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
852 cache, op, base, arch->gprs[base], offset);
853
854 /* Secondary or tirtiary cache ops ignored */
855 if (cache != Cache_I && cache != Cache_D)
856 return EMULATE_DONE;
857
858 switch (op_inst) {
859 case Index_Invalidate_I:
860 flush_icache_line_indexed(va);
861 return EMULATE_DONE;
862 case Index_Writeback_Inv_D:
863 flush_dcache_line_indexed(va);
864 return EMULATE_DONE;
865 default:
866 break;
867 };
868
869 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
870 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
871 offset);
872 /* Rollback PC */
873 vcpu->arch.pc = curr_pc;
874
875 return EMULATE_FAIL;
876}
877
878static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
879 struct kvm_vcpu *vcpu)
880{
881 enum emulation_result er = EMULATE_DONE;
882 struct kvm_vcpu_arch *arch = &vcpu->arch;
883 struct kvm_run *run = vcpu->run;
884 union mips_instruction inst;
885 int rd, rt, sel;
886 int err;
887
888 /*
889 * Fetch the instruction.
890 */
891 if (cause & CAUSEF_BD)
892 opc += 1;
893 err = kvm_get_badinstr(opc, vcpu, &inst.word);
894 if (err)
895 return EMULATE_FAIL;
896
897 switch (inst.r_format.opcode) {
898 case cop0_op:
899 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
900 break;
901#ifndef CONFIG_CPU_MIPSR6
902 case cache_op:
903 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
904 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
905 break;
906#endif
907 case spec3_op:
908 switch (inst.spec3_format.func) {
909#ifdef CONFIG_CPU_MIPSR6
910 case cache6_op:
911 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
912 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
913 break;
914#endif
915 case rdhwr_op:
916 if (inst.r_format.rs || (inst.r_format.re >> 3))
917 goto unknown;
918
919 rd = inst.r_format.rd;
920 rt = inst.r_format.rt;
921 sel = inst.r_format.re & 0x7;
922
923 switch (rd) {
924 case MIPS_HWR_CC: /* Read count register */
925 arch->gprs[rt] =
926 (long)(int)kvm_mips_read_count(vcpu);
927 break;
928 default:
929 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
930 KVM_TRACE_HWR(rd, sel), 0);
931 goto unknown;
932 };
933
934 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
935 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
936
937 er = update_pc(vcpu, cause);
938 break;
939 default:
940 goto unknown;
941 };
942 break;
943unknown:
944
945 default:
946 kvm_err("GPSI exception not supported (%p/%#x)\n",
947 opc, inst.word);
948 kvm_arch_vcpu_dump_regs(vcpu);
949 er = EMULATE_FAIL;
950 break;
951 }
952
953 return er;
954}
955
956static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
957 struct kvm_vcpu *vcpu)
958{
959 enum emulation_result er = EMULATE_DONE;
960 struct kvm_vcpu_arch *arch = &vcpu->arch;
961 union mips_instruction inst;
962 int err;
963
964 /*
965 * Fetch the instruction.
966 */
967 if (cause & CAUSEF_BD)
968 opc += 1;
969 err = kvm_get_badinstr(opc, vcpu, &inst.word);
970 if (err)
971 return EMULATE_FAIL;
972
973 /* complete MTC0 on behalf of guest and advance EPC */
974 if (inst.c0r_format.opcode == cop0_op &&
975 inst.c0r_format.rs == mtc_op &&
976 inst.c0r_format.z == 0) {
977 int rt = inst.c0r_format.rt;
978 int rd = inst.c0r_format.rd;
979 int sel = inst.c0r_format.sel;
980 unsigned int val = arch->gprs[rt];
981 unsigned int old_val, change;
982
983 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
984 val);
985
986 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
987 /* FR bit should read as zero if no FPU */
988 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
989 val &= ~(ST0_CU1 | ST0_FR);
990
991 /*
992 * Also don't allow FR to be set if host doesn't support
993 * it.
994 */
995 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
996 val &= ~ST0_FR;
997
998 old_val = read_gc0_status();
999 change = val ^ old_val;
1000
1001 if (change & ST0_FR) {
1002 /*
1003 * FPU and Vector register state is made
1004 * UNPREDICTABLE by a change of FR, so don't
1005 * even bother saving it.
1006 */
1007 kvm_drop_fpu(vcpu);
1008 }
1009
1010 /*
1011 * If MSA state is already live, it is undefined how it
1012 * interacts with FR=0 FPU state, and we don't want to
1013 * hit reserved instruction exceptions trying to save
1014 * the MSA state later when CU=1 && FR=1, so play it
1015 * safe and save it first.
1016 */
1017 if (change & ST0_CU1 && !(val & ST0_FR) &&
1018 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1019 kvm_lose_fpu(vcpu);
1020
1021 write_gc0_status(val);
1022 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1023 u32 old_cause = read_gc0_cause();
1024 u32 change = old_cause ^ val;
1025
1026 /* DC bit enabling/disabling timer? */
1027 if (change & CAUSEF_DC) {
1028 if (val & CAUSEF_DC)
1029 kvm_mips_count_disable_cause(vcpu);
1030 else
1031 kvm_mips_count_enable_cause(vcpu);
1032 }
1033
1034 /* Only certain bits are RW to the guest */
1035 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1036 CAUSEF_IP0 | CAUSEF_IP1);
1037
1038 /* WP can only be cleared */
1039 change &= ~CAUSEF_WP | old_cause;
1040
1041 write_gc0_cause(old_cause ^ change);
1042 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1043 write_gc0_intctl(val);
1044 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1045 old_val = read_gc0_config5();
1046 change = val ^ old_val;
1047 /* Handle changes in FPU/MSA modes */
1048 preempt_disable();
1049
1050 /*
1051 * Propagate FRE changes immediately if the FPU
1052 * context is already loaded.
1053 */
1054 if (change & MIPS_CONF5_FRE &&
1055 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1056 change_c0_config5(MIPS_CONF5_FRE, val);
1057
1058 preempt_enable();
1059
1060 val = old_val ^
1061 (change & kvm_vz_config5_guest_wrmask(vcpu));
1062 write_gc0_config5(val);
1063 } else {
1064 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1065 opc, inst.word);
1066 er = EMULATE_FAIL;
1067 }
1068
1069 if (er != EMULATE_FAIL)
1070 er = update_pc(vcpu, cause);
1071 } else {
1072 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1073 opc, inst.word);
1074 er = EMULATE_FAIL;
1075 }
1076
1077 return er;
1078}
1079
1080static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1081 struct kvm_vcpu *vcpu)
1082{
1083 enum emulation_result er;
1084 union mips_instruction inst;
1085 unsigned long curr_pc;
1086 int err;
1087
1088 if (cause & CAUSEF_BD)
1089 opc += 1;
1090 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1091 if (err)
1092 return EMULATE_FAIL;
1093
1094 /*
1095 * Update PC and hold onto current PC in case there is
1096 * an error and we want to rollback the PC
1097 */
1098 curr_pc = vcpu->arch.pc;
1099 er = update_pc(vcpu, cause);
1100 if (er == EMULATE_FAIL)
1101 return er;
1102
1103 er = kvm_mips_emul_hypcall(vcpu, inst);
1104 if (er == EMULATE_FAIL)
1105 vcpu->arch.pc = curr_pc;
1106
1107 return er;
1108}
1109
1110static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1111 u32 cause,
1112 u32 *opc,
1113 struct kvm_vcpu *vcpu)
1114{
1115 u32 inst;
1116
1117 /*
1118 * Fetch the instruction.
1119 */
1120 if (cause & CAUSEF_BD)
1121 opc += 1;
1122 kvm_get_badinstr(opc, vcpu, &inst);
1123
1124 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
1125 gexccode, opc, inst, read_gc0_status());
1126
1127 return EMULATE_FAIL;
1128}
1129
1130static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1131{
1132 u32 *opc = (u32 *) vcpu->arch.pc;
1133 u32 cause = vcpu->arch.host_cp0_cause;
1134 enum emulation_result er = EMULATE_DONE;
1135 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1136 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1137 int ret = RESUME_GUEST;
1138
1139 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1140 switch (gexccode) {
1141 case MIPS_GCTL0_GEXC_GPSI:
1142 ++vcpu->stat.vz_gpsi_exits;
1143 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1144 break;
1145 case MIPS_GCTL0_GEXC_GSFC:
1146 ++vcpu->stat.vz_gsfc_exits;
1147 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1148 break;
1149 case MIPS_GCTL0_GEXC_HC:
1150 ++vcpu->stat.vz_hc_exits;
1151 er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1152 break;
1153 case MIPS_GCTL0_GEXC_GRR:
1154 ++vcpu->stat.vz_grr_exits;
1155 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1156 vcpu);
1157 break;
1158 case MIPS_GCTL0_GEXC_GVA:
1159 ++vcpu->stat.vz_gva_exits;
1160 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1161 vcpu);
1162 break;
1163 case MIPS_GCTL0_GEXC_GHFC:
1164 ++vcpu->stat.vz_ghfc_exits;
1165 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1166 vcpu);
1167 break;
1168 case MIPS_GCTL0_GEXC_GPA:
1169 ++vcpu->stat.vz_gpa_exits;
1170 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1171 vcpu);
1172 break;
1173 default:
1174 ++vcpu->stat.vz_resvd_exits;
1175 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1176 vcpu);
1177 break;
1178
1179 }
1180
1181 if (er == EMULATE_DONE) {
1182 ret = RESUME_GUEST;
1183 } else if (er == EMULATE_HYPERCALL) {
1184 ret = kvm_mips_handle_hypcall(vcpu);
1185 } else {
1186 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1187 ret = RESUME_HOST;
1188 }
1189 return ret;
1190}
1191
1192/**
1193 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1194 * @vcpu: Virtual CPU context.
1195 *
1196 * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1197 * by the root context.
1198 */
1199static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1200{
1201 struct kvm_run *run = vcpu->run;
1202 u32 cause = vcpu->arch.host_cp0_cause;
1203 enum emulation_result er = EMULATE_FAIL;
1204 int ret = RESUME_GUEST;
1205
1206 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1207 /*
1208 * If guest FPU not present, the FPU operation should have been
1209 * treated as a reserved instruction!
1210 * If FPU already in use, we shouldn't get this at all.
1211 */
1212 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1213 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1214 preempt_enable();
1215 return EMULATE_FAIL;
1216 }
1217
1218 kvm_own_fpu(vcpu);
1219 er = EMULATE_DONE;
1220 }
1221 /* other coprocessors not handled */
1222
1223 switch (er) {
1224 case EMULATE_DONE:
1225 ret = RESUME_GUEST;
1226 break;
1227
1228 case EMULATE_FAIL:
1229 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1230 ret = RESUME_HOST;
1231 break;
1232
1233 default:
1234 BUG();
1235 }
1236 return ret;
1237}
1238
1239/**
1240 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1241 * @vcpu: Virtual CPU context.
1242 *
1243 * Handle when the guest attempts to use MSA when it is disabled in the root
1244 * context.
1245 */
1246static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1247{
1248 struct kvm_run *run = vcpu->run;
1249
1250 /*
1251 * If MSA not present or not exposed to guest or FR=0, the MSA operation
1252 * should have been treated as a reserved instruction!
1253 * Same if CU1=1, FR=0.
1254 * If MSA already in use, we shouldn't get this at all.
1255 */
1256 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1257 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1258 !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1259 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1260 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1261 return RESUME_HOST;
1262 }
1263
1264 kvm_own_msa(vcpu);
1265
1266 return RESUME_GUEST;
1267}
1268
1269static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1270{
1271 struct kvm_run *run = vcpu->run;
1272 u32 *opc = (u32 *) vcpu->arch.pc;
1273 u32 cause = vcpu->arch.host_cp0_cause;
1274 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1275 union mips_instruction inst;
1276 enum emulation_result er = EMULATE_DONE;
1277 int err, ret = RESUME_GUEST;
1278
1279 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1280 /* A code fetch fault doesn't count as an MMIO */
1281 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1282 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1283 return RESUME_HOST;
1284 }
1285
1286 /* Fetch the instruction */
1287 if (cause & CAUSEF_BD)
1288 opc += 1;
1289 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1290 if (err) {
1291 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1292 return RESUME_HOST;
1293 }
1294
1295 /* Treat as MMIO */
1296 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1297 if (er == EMULATE_FAIL) {
1298 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1299 opc, badvaddr);
1300 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1301 }
1302 }
1303
1304 if (er == EMULATE_DONE) {
1305 ret = RESUME_GUEST;
1306 } else if (er == EMULATE_DO_MMIO) {
1307 run->exit_reason = KVM_EXIT_MMIO;
1308 ret = RESUME_HOST;
1309 } else {
1310 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1311 ret = RESUME_HOST;
1312 }
1313 return ret;
1314}
1315
1316static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1317{
1318 struct kvm_run *run = vcpu->run;
1319 u32 *opc = (u32 *) vcpu->arch.pc;
1320 u32 cause = vcpu->arch.host_cp0_cause;
1321 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1322 union mips_instruction inst;
1323 enum emulation_result er = EMULATE_DONE;
1324 int err;
1325 int ret = RESUME_GUEST;
1326
1327 /* Just try the access again if we couldn't do the translation */
1328 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1329 return RESUME_GUEST;
1330 vcpu->arch.host_cp0_badvaddr = badvaddr;
1331
1332 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1333 /* Fetch the instruction */
1334 if (cause & CAUSEF_BD)
1335 opc += 1;
1336 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1337 if (err) {
1338 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1339 return RESUME_HOST;
1340 }
1341
1342 /* Treat as MMIO */
1343 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1344 if (er == EMULATE_FAIL) {
1345 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1346 opc, badvaddr);
1347 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1348 }
1349 }
1350
1351 if (er == EMULATE_DONE) {
1352 ret = RESUME_GUEST;
1353 } else if (er == EMULATE_DO_MMIO) {
1354 run->exit_reason = KVM_EXIT_MMIO;
1355 ret = RESUME_HOST;
1356 } else {
1357 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1358 ret = RESUME_HOST;
1359 }
1360 return ret;
1361}
1362
1363static u64 kvm_vz_get_one_regs[] = {
1364 KVM_REG_MIPS_CP0_INDEX,
1365 KVM_REG_MIPS_CP0_ENTRYLO0,
1366 KVM_REG_MIPS_CP0_ENTRYLO1,
1367 KVM_REG_MIPS_CP0_CONTEXT,
1368 KVM_REG_MIPS_CP0_PAGEMASK,
1369 KVM_REG_MIPS_CP0_PAGEGRAIN,
1370 KVM_REG_MIPS_CP0_WIRED,
1371 KVM_REG_MIPS_CP0_HWRENA,
1372 KVM_REG_MIPS_CP0_BADVADDR,
1373 KVM_REG_MIPS_CP0_COUNT,
1374 KVM_REG_MIPS_CP0_ENTRYHI,
1375 KVM_REG_MIPS_CP0_COMPARE,
1376 KVM_REG_MIPS_CP0_STATUS,
1377 KVM_REG_MIPS_CP0_INTCTL,
1378 KVM_REG_MIPS_CP0_CAUSE,
1379 KVM_REG_MIPS_CP0_EPC,
1380 KVM_REG_MIPS_CP0_PRID,
1381 KVM_REG_MIPS_CP0_EBASE,
1382 KVM_REG_MIPS_CP0_CONFIG,
1383 KVM_REG_MIPS_CP0_CONFIG1,
1384 KVM_REG_MIPS_CP0_CONFIG2,
1385 KVM_REG_MIPS_CP0_CONFIG3,
1386 KVM_REG_MIPS_CP0_CONFIG4,
1387 KVM_REG_MIPS_CP0_CONFIG5,
1388#ifdef CONFIG_64BIT
1389 KVM_REG_MIPS_CP0_XCONTEXT,
1390#endif
1391 KVM_REG_MIPS_CP0_ERROREPC,
1392
1393 KVM_REG_MIPS_COUNT_CTL,
1394 KVM_REG_MIPS_COUNT_RESUME,
1395 KVM_REG_MIPS_COUNT_HZ,
1396};
1397
dffe042f
JH
1398static u64 kvm_vz_get_one_regs_contextconfig[] = {
1399 KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1400#ifdef CONFIG_64BIT
1401 KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1402#endif
1403};
1404
4b7de028
JH
1405static u64 kvm_vz_get_one_regs_segments[] = {
1406 KVM_REG_MIPS_CP0_SEGCTL0,
1407 KVM_REG_MIPS_CP0_SEGCTL1,
1408 KVM_REG_MIPS_CP0_SEGCTL2,
1409};
1410
5a2f352f
JH
1411static u64 kvm_vz_get_one_regs_htw[] = {
1412 KVM_REG_MIPS_CP0_PWBASE,
1413 KVM_REG_MIPS_CP0_PWFIELD,
1414 KVM_REG_MIPS_CP0_PWSIZE,
1415 KVM_REG_MIPS_CP0_PWCTL,
1416};
1417
c992a4f6
JH
1418static u64 kvm_vz_get_one_regs_kscratch[] = {
1419 KVM_REG_MIPS_CP0_KSCRATCH1,
1420 KVM_REG_MIPS_CP0_KSCRATCH2,
1421 KVM_REG_MIPS_CP0_KSCRATCH3,
1422 KVM_REG_MIPS_CP0_KSCRATCH4,
1423 KVM_REG_MIPS_CP0_KSCRATCH5,
1424 KVM_REG_MIPS_CP0_KSCRATCH6,
1425};
1426
1427static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1428{
1429 unsigned long ret;
1430
1431 ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1432 if (cpu_guest_has_userlocal)
1433 ++ret;
edc89260
JH
1434 if (cpu_guest_has_badinstr)
1435 ++ret;
1436 if (cpu_guest_has_badinstrp)
1437 ++ret;
dffe042f
JH
1438 if (cpu_guest_has_contextconfig)
1439 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
4b7de028
JH
1440 if (cpu_guest_has_segments)
1441 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
5a2f352f
JH
1442 if (cpu_guest_has_htw)
1443 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
c992a4f6
JH
1444 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1445
1446 return ret;
1447}
1448
1449static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1450{
1451 u64 index;
1452 unsigned int i;
1453
1454 if (copy_to_user(indices, kvm_vz_get_one_regs,
1455 sizeof(kvm_vz_get_one_regs)))
1456 return -EFAULT;
1457 indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1458
1459 if (cpu_guest_has_userlocal) {
1460 index = KVM_REG_MIPS_CP0_USERLOCAL;
1461 if (copy_to_user(indices, &index, sizeof(index)))
1462 return -EFAULT;
1463 ++indices;
1464 }
edc89260
JH
1465 if (cpu_guest_has_badinstr) {
1466 index = KVM_REG_MIPS_CP0_BADINSTR;
1467 if (copy_to_user(indices, &index, sizeof(index)))
1468 return -EFAULT;
1469 ++indices;
1470 }
1471 if (cpu_guest_has_badinstrp) {
1472 index = KVM_REG_MIPS_CP0_BADINSTRP;
1473 if (copy_to_user(indices, &index, sizeof(index)))
1474 return -EFAULT;
1475 ++indices;
1476 }
dffe042f
JH
1477 if (cpu_guest_has_contextconfig) {
1478 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1479 sizeof(kvm_vz_get_one_regs_contextconfig)))
1480 return -EFAULT;
1481 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1482 }
4b7de028
JH
1483 if (cpu_guest_has_segments) {
1484 if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1485 sizeof(kvm_vz_get_one_regs_segments)))
1486 return -EFAULT;
1487 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1488 }
5a2f352f
JH
1489 if (cpu_guest_has_htw) {
1490 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1491 sizeof(kvm_vz_get_one_regs_htw)))
1492 return -EFAULT;
1493 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1494 }
c992a4f6
JH
1495 for (i = 0; i < 6; ++i) {
1496 if (!cpu_guest_has_kscr(i + 2))
1497 continue;
1498
1499 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1500 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1501 return -EFAULT;
1502 ++indices;
1503 }
1504
1505 return 0;
1506}
1507
1508static inline s64 entrylo_kvm_to_user(unsigned long v)
1509{
1510 s64 mask, ret = v;
1511
1512 if (BITS_PER_LONG == 32) {
1513 /*
1514 * KVM API exposes 64-bit version of the register, so move the
1515 * RI/XI bits up into place.
1516 */
1517 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1518 ret &= ~mask;
1519 ret |= ((s64)v & mask) << 32;
1520 }
1521 return ret;
1522}
1523
1524static inline unsigned long entrylo_user_to_kvm(s64 v)
1525{
1526 unsigned long mask, ret = v;
1527
1528 if (BITS_PER_LONG == 32) {
1529 /*
1530 * KVM API exposes 64-bit versiono of the register, so move the
1531 * RI/XI bits down into place.
1532 */
1533 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1534 ret &= ~mask;
1535 ret |= (v >> 32) & mask;
1536 }
1537 return ret;
1538}
1539
1540static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1541 const struct kvm_one_reg *reg,
1542 s64 *v)
1543{
1544 struct mips_coproc *cop0 = vcpu->arch.cop0;
1545 unsigned int idx;
1546
1547 switch (reg->id) {
1548 case KVM_REG_MIPS_CP0_INDEX:
1549 *v = (long)read_gc0_index();
1550 break;
1551 case KVM_REG_MIPS_CP0_ENTRYLO0:
1552 *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1553 break;
1554 case KVM_REG_MIPS_CP0_ENTRYLO1:
1555 *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1556 break;
1557 case KVM_REG_MIPS_CP0_CONTEXT:
1558 *v = (long)read_gc0_context();
1559 break;
dffe042f
JH
1560 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1561 if (!cpu_guest_has_contextconfig)
1562 return -EINVAL;
1563 *v = read_gc0_contextconfig();
1564 break;
c992a4f6
JH
1565 case KVM_REG_MIPS_CP0_USERLOCAL:
1566 if (!cpu_guest_has_userlocal)
1567 return -EINVAL;
1568 *v = read_gc0_userlocal();
1569 break;
dffe042f
JH
1570#ifdef CONFIG_64BIT
1571 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1572 if (!cpu_guest_has_contextconfig)
1573 return -EINVAL;
1574 *v = read_gc0_xcontextconfig();
1575 break;
1576#endif
c992a4f6
JH
1577 case KVM_REG_MIPS_CP0_PAGEMASK:
1578 *v = (long)read_gc0_pagemask();
1579 break;
1580 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1581 *v = (long)read_gc0_pagegrain();
1582 break;
4b7de028
JH
1583 case KVM_REG_MIPS_CP0_SEGCTL0:
1584 if (!cpu_guest_has_segments)
1585 return -EINVAL;
1586 *v = read_gc0_segctl0();
1587 break;
1588 case KVM_REG_MIPS_CP0_SEGCTL1:
1589 if (!cpu_guest_has_segments)
1590 return -EINVAL;
1591 *v = read_gc0_segctl1();
1592 break;
1593 case KVM_REG_MIPS_CP0_SEGCTL2:
1594 if (!cpu_guest_has_segments)
1595 return -EINVAL;
1596 *v = read_gc0_segctl2();
1597 break;
5a2f352f
JH
1598 case KVM_REG_MIPS_CP0_PWBASE:
1599 if (!cpu_guest_has_htw)
1600 return -EINVAL;
1601 *v = read_gc0_pwbase();
1602 break;
1603 case KVM_REG_MIPS_CP0_PWFIELD:
1604 if (!cpu_guest_has_htw)
1605 return -EINVAL;
1606 *v = read_gc0_pwfield();
1607 break;
1608 case KVM_REG_MIPS_CP0_PWSIZE:
1609 if (!cpu_guest_has_htw)
1610 return -EINVAL;
1611 *v = read_gc0_pwsize();
1612 break;
c992a4f6
JH
1613 case KVM_REG_MIPS_CP0_WIRED:
1614 *v = (long)read_gc0_wired();
1615 break;
5a2f352f
JH
1616 case KVM_REG_MIPS_CP0_PWCTL:
1617 if (!cpu_guest_has_htw)
1618 return -EINVAL;
1619 *v = read_gc0_pwctl();
1620 break;
c992a4f6
JH
1621 case KVM_REG_MIPS_CP0_HWRENA:
1622 *v = (long)read_gc0_hwrena();
1623 break;
1624 case KVM_REG_MIPS_CP0_BADVADDR:
1625 *v = (long)read_gc0_badvaddr();
1626 break;
edc89260
JH
1627 case KVM_REG_MIPS_CP0_BADINSTR:
1628 if (!cpu_guest_has_badinstr)
1629 return -EINVAL;
1630 *v = read_gc0_badinstr();
1631 break;
1632 case KVM_REG_MIPS_CP0_BADINSTRP:
1633 if (!cpu_guest_has_badinstrp)
1634 return -EINVAL;
1635 *v = read_gc0_badinstrp();
1636 break;
c992a4f6
JH
1637 case KVM_REG_MIPS_CP0_COUNT:
1638 *v = kvm_mips_read_count(vcpu);
1639 break;
1640 case KVM_REG_MIPS_CP0_ENTRYHI:
1641 *v = (long)read_gc0_entryhi();
1642 break;
1643 case KVM_REG_MIPS_CP0_COMPARE:
1644 *v = (long)read_gc0_compare();
1645 break;
1646 case KVM_REG_MIPS_CP0_STATUS:
1647 *v = (long)read_gc0_status();
1648 break;
1649 case KVM_REG_MIPS_CP0_INTCTL:
1650 *v = read_gc0_intctl();
1651 break;
1652 case KVM_REG_MIPS_CP0_CAUSE:
1653 *v = (long)read_gc0_cause();
1654 break;
1655 case KVM_REG_MIPS_CP0_EPC:
1656 *v = (long)read_gc0_epc();
1657 break;
1658 case KVM_REG_MIPS_CP0_PRID:
1659 *v = (long)kvm_read_c0_guest_prid(cop0);
1660 break;
1661 case KVM_REG_MIPS_CP0_EBASE:
1662 *v = kvm_vz_read_gc0_ebase();
1663 break;
1664 case KVM_REG_MIPS_CP0_CONFIG:
1665 *v = read_gc0_config();
1666 break;
1667 case KVM_REG_MIPS_CP0_CONFIG1:
1668 if (!cpu_guest_has_conf1)
1669 return -EINVAL;
1670 *v = read_gc0_config1();
1671 break;
1672 case KVM_REG_MIPS_CP0_CONFIG2:
1673 if (!cpu_guest_has_conf2)
1674 return -EINVAL;
1675 *v = read_gc0_config2();
1676 break;
1677 case KVM_REG_MIPS_CP0_CONFIG3:
1678 if (!cpu_guest_has_conf3)
1679 return -EINVAL;
1680 *v = read_gc0_config3();
1681 break;
1682 case KVM_REG_MIPS_CP0_CONFIG4:
1683 if (!cpu_guest_has_conf4)
1684 return -EINVAL;
1685 *v = read_gc0_config4();
1686 break;
1687 case KVM_REG_MIPS_CP0_CONFIG5:
1688 if (!cpu_guest_has_conf5)
1689 return -EINVAL;
1690 *v = read_gc0_config5();
1691 break;
1692#ifdef CONFIG_64BIT
1693 case KVM_REG_MIPS_CP0_XCONTEXT:
1694 *v = read_gc0_xcontext();
1695 break;
1696#endif
1697 case KVM_REG_MIPS_CP0_ERROREPC:
1698 *v = (long)read_gc0_errorepc();
1699 break;
1700 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
1701 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
1702 if (!cpu_guest_has_kscr(idx))
1703 return -EINVAL;
1704 switch (idx) {
1705 case 2:
1706 *v = (long)read_gc0_kscratch1();
1707 break;
1708 case 3:
1709 *v = (long)read_gc0_kscratch2();
1710 break;
1711 case 4:
1712 *v = (long)read_gc0_kscratch3();
1713 break;
1714 case 5:
1715 *v = (long)read_gc0_kscratch4();
1716 break;
1717 case 6:
1718 *v = (long)read_gc0_kscratch5();
1719 break;
1720 case 7:
1721 *v = (long)read_gc0_kscratch6();
1722 break;
1723 }
1724 break;
1725 case KVM_REG_MIPS_COUNT_CTL:
1726 *v = vcpu->arch.count_ctl;
1727 break;
1728 case KVM_REG_MIPS_COUNT_RESUME:
1729 *v = ktime_to_ns(vcpu->arch.count_resume);
1730 break;
1731 case KVM_REG_MIPS_COUNT_HZ:
1732 *v = vcpu->arch.count_hz;
1733 break;
1734 default:
1735 return -EINVAL;
1736 }
1737 return 0;
1738}
1739
1740static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
1741 const struct kvm_one_reg *reg,
1742 s64 v)
1743{
1744 struct mips_coproc *cop0 = vcpu->arch.cop0;
1745 unsigned int idx;
1746 int ret = 0;
1747 unsigned int cur, change;
1748
1749 switch (reg->id) {
1750 case KVM_REG_MIPS_CP0_INDEX:
1751 write_gc0_index(v);
1752 break;
1753 case KVM_REG_MIPS_CP0_ENTRYLO0:
1754 write_gc0_entrylo0(entrylo_user_to_kvm(v));
1755 break;
1756 case KVM_REG_MIPS_CP0_ENTRYLO1:
1757 write_gc0_entrylo1(entrylo_user_to_kvm(v));
1758 break;
1759 case KVM_REG_MIPS_CP0_CONTEXT:
1760 write_gc0_context(v);
1761 break;
dffe042f
JH
1762 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1763 if (!cpu_guest_has_contextconfig)
1764 return -EINVAL;
1765 write_gc0_contextconfig(v);
1766 break;
c992a4f6
JH
1767 case KVM_REG_MIPS_CP0_USERLOCAL:
1768 if (!cpu_guest_has_userlocal)
1769 return -EINVAL;
1770 write_gc0_userlocal(v);
1771 break;
dffe042f
JH
1772#ifdef CONFIG_64BIT
1773 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1774 if (!cpu_guest_has_contextconfig)
1775 return -EINVAL;
1776 write_gc0_xcontextconfig(v);
1777 break;
1778#endif
c992a4f6
JH
1779 case KVM_REG_MIPS_CP0_PAGEMASK:
1780 write_gc0_pagemask(v);
1781 break;
1782 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1783 write_gc0_pagegrain(v);
1784 break;
4b7de028
JH
1785 case KVM_REG_MIPS_CP0_SEGCTL0:
1786 if (!cpu_guest_has_segments)
1787 return -EINVAL;
1788 write_gc0_segctl0(v);
1789 break;
1790 case KVM_REG_MIPS_CP0_SEGCTL1:
1791 if (!cpu_guest_has_segments)
1792 return -EINVAL;
1793 write_gc0_segctl1(v);
1794 break;
1795 case KVM_REG_MIPS_CP0_SEGCTL2:
1796 if (!cpu_guest_has_segments)
1797 return -EINVAL;
1798 write_gc0_segctl2(v);
1799 break;
5a2f352f
JH
1800 case KVM_REG_MIPS_CP0_PWBASE:
1801 if (!cpu_guest_has_htw)
1802 return -EINVAL;
1803 write_gc0_pwbase(v);
1804 break;
1805 case KVM_REG_MIPS_CP0_PWFIELD:
1806 if (!cpu_guest_has_htw)
1807 return -EINVAL;
1808 write_gc0_pwfield(v);
1809 break;
1810 case KVM_REG_MIPS_CP0_PWSIZE:
1811 if (!cpu_guest_has_htw)
1812 return -EINVAL;
1813 write_gc0_pwsize(v);
1814 break;
c992a4f6
JH
1815 case KVM_REG_MIPS_CP0_WIRED:
1816 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
1817 break;
5a2f352f
JH
1818 case KVM_REG_MIPS_CP0_PWCTL:
1819 if (!cpu_guest_has_htw)
1820 return -EINVAL;
1821 write_gc0_pwctl(v);
1822 break;
c992a4f6
JH
1823 case KVM_REG_MIPS_CP0_HWRENA:
1824 write_gc0_hwrena(v);
1825 break;
1826 case KVM_REG_MIPS_CP0_BADVADDR:
1827 write_gc0_badvaddr(v);
1828 break;
edc89260
JH
1829 case KVM_REG_MIPS_CP0_BADINSTR:
1830 if (!cpu_guest_has_badinstr)
1831 return -EINVAL;
1832 write_gc0_badinstr(v);
1833 break;
1834 case KVM_REG_MIPS_CP0_BADINSTRP:
1835 if (!cpu_guest_has_badinstrp)
1836 return -EINVAL;
1837 write_gc0_badinstrp(v);
1838 break;
c992a4f6
JH
1839 case KVM_REG_MIPS_CP0_COUNT:
1840 kvm_mips_write_count(vcpu, v);
1841 break;
1842 case KVM_REG_MIPS_CP0_ENTRYHI:
1843 write_gc0_entryhi(v);
1844 break;
1845 case KVM_REG_MIPS_CP0_COMPARE:
1846 kvm_mips_write_compare(vcpu, v, false);
1847 break;
1848 case KVM_REG_MIPS_CP0_STATUS:
1849 write_gc0_status(v);
1850 break;
1851 case KVM_REG_MIPS_CP0_INTCTL:
1852 write_gc0_intctl(v);
1853 break;
1854 case KVM_REG_MIPS_CP0_CAUSE:
1855 /*
1856 * If the timer is stopped or started (DC bit) it must look
1857 * atomic with changes to the timer interrupt pending bit (TI).
1858 * A timer interrupt should not happen in between.
1859 */
1860 if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
1861 if (v & CAUSEF_DC) {
1862 /* disable timer first */
1863 kvm_mips_count_disable_cause(vcpu);
1864 change_gc0_cause((u32)~CAUSEF_DC, v);
1865 } else {
1866 /* enable timer last */
1867 change_gc0_cause((u32)~CAUSEF_DC, v);
1868 kvm_mips_count_enable_cause(vcpu);
1869 }
1870 } else {
1871 write_gc0_cause(v);
1872 }
1873 break;
1874 case KVM_REG_MIPS_CP0_EPC:
1875 write_gc0_epc(v);
1876 break;
1877 case KVM_REG_MIPS_CP0_PRID:
1878 kvm_write_c0_guest_prid(cop0, v);
1879 break;
1880 case KVM_REG_MIPS_CP0_EBASE:
1881 kvm_vz_write_gc0_ebase(v);
1882 break;
1883 case KVM_REG_MIPS_CP0_CONFIG:
1884 cur = read_gc0_config();
1885 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
1886 if (change) {
1887 v = cur ^ change;
1888 write_gc0_config(v);
1889 }
1890 break;
1891 case KVM_REG_MIPS_CP0_CONFIG1:
1892 if (!cpu_guest_has_conf1)
1893 break;
1894 cur = read_gc0_config1();
1895 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
1896 if (change) {
1897 v = cur ^ change;
1898 write_gc0_config1(v);
1899 }
1900 break;
1901 case KVM_REG_MIPS_CP0_CONFIG2:
1902 if (!cpu_guest_has_conf2)
1903 break;
1904 cur = read_gc0_config2();
1905 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
1906 if (change) {
1907 v = cur ^ change;
1908 write_gc0_config2(v);
1909 }
1910 break;
1911 case KVM_REG_MIPS_CP0_CONFIG3:
1912 if (!cpu_guest_has_conf3)
1913 break;
1914 cur = read_gc0_config3();
1915 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
1916 if (change) {
1917 v = cur ^ change;
1918 write_gc0_config3(v);
1919 }
1920 break;
1921 case KVM_REG_MIPS_CP0_CONFIG4:
1922 if (!cpu_guest_has_conf4)
1923 break;
1924 cur = read_gc0_config4();
1925 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
1926 if (change) {
1927 v = cur ^ change;
1928 write_gc0_config4(v);
1929 }
1930 break;
1931 case KVM_REG_MIPS_CP0_CONFIG5:
1932 if (!cpu_guest_has_conf5)
1933 break;
1934 cur = read_gc0_config5();
1935 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
1936 if (change) {
1937 v = cur ^ change;
1938 write_gc0_config5(v);
1939 }
1940 break;
1941#ifdef CONFIG_64BIT
1942 case KVM_REG_MIPS_CP0_XCONTEXT:
1943 write_gc0_xcontext(v);
1944 break;
1945#endif
1946 case KVM_REG_MIPS_CP0_ERROREPC:
1947 write_gc0_errorepc(v);
1948 break;
1949 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
1950 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
1951 if (!cpu_guest_has_kscr(idx))
1952 return -EINVAL;
1953 switch (idx) {
1954 case 2:
1955 write_gc0_kscratch1(v);
1956 break;
1957 case 3:
1958 write_gc0_kscratch2(v);
1959 break;
1960 case 4:
1961 write_gc0_kscratch3(v);
1962 break;
1963 case 5:
1964 write_gc0_kscratch4(v);
1965 break;
1966 case 6:
1967 write_gc0_kscratch5(v);
1968 break;
1969 case 7:
1970 write_gc0_kscratch6(v);
1971 break;
1972 }
1973 break;
1974 case KVM_REG_MIPS_COUNT_CTL:
1975 ret = kvm_mips_set_count_ctl(vcpu, v);
1976 break;
1977 case KVM_REG_MIPS_COUNT_RESUME:
1978 ret = kvm_mips_set_count_resume(vcpu, v);
1979 break;
1980 case KVM_REG_MIPS_COUNT_HZ:
1981 ret = kvm_mips_set_count_hz(vcpu, v);
1982 break;
1983 default:
1984 return -EINVAL;
1985 }
1986 return ret;
1987}
1988
1989#define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
1990static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
1991{
1992 unsigned long guestid = guestid_cache(cpu);
1993
1994 if (!(++guestid & GUESTID_MASK)) {
1995 if (cpu_has_vtag_icache)
1996 flush_icache_all();
1997
1998 if (!guestid) /* fix version if needed */
1999 guestid = GUESTID_FIRST_VERSION;
2000
2001 ++guestid; /* guestid 0 reserved for root */
2002
2003 /* start new guestid cycle */
2004 kvm_vz_local_flush_roottlb_all_guests();
2005 kvm_vz_local_flush_guesttlb_all();
2006 }
2007
2008 guestid_cache(cpu) = guestid;
2009}
2010
2011/* Returns 1 if the guest TLB may be clobbered */
2012static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2013{
2014 int ret = 0;
2015 int i;
2016
2017 if (!vcpu->requests)
2018 return 0;
2019
2020 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2021 if (cpu_has_guestid) {
2022 /* Drop all GuestIDs for this VCPU */
2023 for_each_possible_cpu(i)
2024 vcpu->arch.vzguestid[i] = 0;
2025 /* This will clobber guest TLB contents too */
2026 ret = 1;
2027 }
2028 /*
2029 * For Root ASID Dealias (RAD) we don't do anything here, but we
2030 * still need the request to ensure we recheck asid_flush_mask.
2031 * We can still return 0 as only the root TLB will be affected
2032 * by a root ASID flush.
2033 */
2034 }
2035
2036 return ret;
2037}
2038
2039static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2040{
2041 unsigned int wired = read_gc0_wired();
2042 struct kvm_mips_tlb *tlbs;
2043 int i;
2044
2045 /* Expand the wired TLB array if necessary */
2046 wired &= MIPSR6_WIRED_WIRED;
2047 if (wired > vcpu->arch.wired_tlb_limit) {
2048 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2049 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2050 if (WARN_ON(!tlbs)) {
2051 /* Save whatever we can */
2052 wired = vcpu->arch.wired_tlb_limit;
2053 } else {
2054 vcpu->arch.wired_tlb = tlbs;
2055 vcpu->arch.wired_tlb_limit = wired;
2056 }
2057 }
2058
2059 if (wired)
2060 /* Save wired entries from the guest TLB */
2061 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2062 /* Invalidate any dropped entries since last time */
2063 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2064 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2065 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2066 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2067 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2068 }
2069 vcpu->arch.wired_tlb_used = wired;
2070}
2071
2072static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2073{
2074 /* Load wired entries into the guest TLB */
2075 if (vcpu->arch.wired_tlb)
2076 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2077 vcpu->arch.wired_tlb_used);
2078}
2079
2080static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2081{
2082 struct kvm *kvm = vcpu->kvm;
2083 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2084 bool migrated;
2085
2086 /*
2087 * Are we entering guest context on a different CPU to last time?
2088 * If so, the VCPU's guest TLB state on this CPU may be stale.
2089 */
2090 migrated = (vcpu->arch.last_exec_cpu != cpu);
2091 vcpu->arch.last_exec_cpu = cpu;
2092
2093 /*
2094 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2095 * remains set until another vcpu is loaded in. As a rule GuestRID
2096 * remains zeroed when in root context unless the kernel is busy
2097 * manipulating guest tlb entries.
2098 */
2099 if (cpu_has_guestid) {
2100 /*
2101 * Check if our GuestID is of an older version and thus invalid.
2102 *
2103 * We also discard the stored GuestID if we've executed on
2104 * another CPU, as the guest mappings may have changed without
2105 * hypervisor knowledge.
2106 */
2107 if (migrated ||
2108 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2109 GUESTID_VERSION_MASK) {
2110 kvm_vz_get_new_guestid(cpu, vcpu);
2111 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2112 trace_kvm_guestid_change(vcpu,
2113 vcpu->arch.vzguestid[cpu]);
2114 }
2115
2116 /* Restore GuestID */
2117 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2118 } else {
2119 /*
2120 * The Guest TLB only stores a single guest's TLB state, so
2121 * flush it if another VCPU has executed on this CPU.
2122 *
2123 * We also flush if we've executed on another CPU, as the guest
2124 * mappings may have changed without hypervisor knowledge.
2125 */
2126 if (migrated || last_exec_vcpu[cpu] != vcpu)
2127 kvm_vz_local_flush_guesttlb_all();
2128 last_exec_vcpu[cpu] = vcpu;
2129
2130 /*
2131 * Root ASID dealiases guest GPA mappings in the root TLB.
2132 * Allocate new root ASID if needed.
2133 */
2134 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
2135 || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
2136 asid_version_mask(cpu))
2137 get_new_mmu_context(gpa_mm, cpu);
2138 }
2139}
2140
2141static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2142{
2143 struct mips_coproc *cop0 = vcpu->arch.cop0;
2144 bool migrated, all;
2145
2146 /*
2147 * Have we migrated to a different CPU?
2148 * If so, any old guest TLB state may be stale.
2149 */
2150 migrated = (vcpu->arch.last_sched_cpu != cpu);
2151
2152 /*
2153 * Was this the last VCPU to run on this CPU?
2154 * If not, any old guest state from this VCPU will have been clobbered.
2155 */
2156 all = migrated || (last_vcpu[cpu] != vcpu);
2157 last_vcpu[cpu] = vcpu;
2158
2159 /*
2160 * Restore CP0_Wired unconditionally as we clear it after use, and
2161 * restore wired guest TLB entries (while in guest context).
2162 */
2163 kvm_restore_gc0_wired(cop0);
2164 if (current->flags & PF_VCPU) {
2165 tlbw_use_hazard();
2166 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2167 kvm_vz_vcpu_load_wired(vcpu);
2168 }
2169
2170 /*
2171 * Restore timer state regardless, as e.g. Cause.TI can change over time
2172 * if left unmaintained.
2173 */
2174 kvm_vz_restore_timer(vcpu);
2175
2176 /* Don't bother restoring registers multiple times unless necessary */
2177 if (!all)
2178 return 0;
2179
2180 /*
2181 * Restore config registers first, as some implementations restrict
2182 * writes to other registers when the corresponding feature bits aren't
2183 * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2184 */
2185 kvm_restore_gc0_config(cop0);
2186 if (cpu_guest_has_conf1)
2187 kvm_restore_gc0_config1(cop0);
2188 if (cpu_guest_has_conf2)
2189 kvm_restore_gc0_config2(cop0);
2190 if (cpu_guest_has_conf3)
2191 kvm_restore_gc0_config3(cop0);
2192 if (cpu_guest_has_conf4)
2193 kvm_restore_gc0_config4(cop0);
2194 if (cpu_guest_has_conf5)
2195 kvm_restore_gc0_config5(cop0);
2196 if (cpu_guest_has_conf6)
2197 kvm_restore_gc0_config6(cop0);
2198 if (cpu_guest_has_conf7)
2199 kvm_restore_gc0_config7(cop0);
2200
2201 kvm_restore_gc0_index(cop0);
2202 kvm_restore_gc0_entrylo0(cop0);
2203 kvm_restore_gc0_entrylo1(cop0);
2204 kvm_restore_gc0_context(cop0);
dffe042f
JH
2205 if (cpu_guest_has_contextconfig)
2206 kvm_restore_gc0_contextconfig(cop0);
c992a4f6
JH
2207#ifdef CONFIG_64BIT
2208 kvm_restore_gc0_xcontext(cop0);
dffe042f
JH
2209 if (cpu_guest_has_contextconfig)
2210 kvm_restore_gc0_xcontextconfig(cop0);
c992a4f6
JH
2211#endif
2212 kvm_restore_gc0_pagemask(cop0);
2213 kvm_restore_gc0_pagegrain(cop0);
2214 kvm_restore_gc0_hwrena(cop0);
2215 kvm_restore_gc0_badvaddr(cop0);
2216 kvm_restore_gc0_entryhi(cop0);
2217 kvm_restore_gc0_status(cop0);
2218 kvm_restore_gc0_intctl(cop0);
2219 kvm_restore_gc0_epc(cop0);
2220 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2221 if (cpu_guest_has_userlocal)
2222 kvm_restore_gc0_userlocal(cop0);
2223
2224 kvm_restore_gc0_errorepc(cop0);
2225
2226 /* restore KScratch registers if enabled in guest */
2227 if (cpu_guest_has_conf4) {
2228 if (cpu_guest_has_kscr(2))
2229 kvm_restore_gc0_kscratch1(cop0);
2230 if (cpu_guest_has_kscr(3))
2231 kvm_restore_gc0_kscratch2(cop0);
2232 if (cpu_guest_has_kscr(4))
2233 kvm_restore_gc0_kscratch3(cop0);
2234 if (cpu_guest_has_kscr(5))
2235 kvm_restore_gc0_kscratch4(cop0);
2236 if (cpu_guest_has_kscr(6))
2237 kvm_restore_gc0_kscratch5(cop0);
2238 if (cpu_guest_has_kscr(7))
2239 kvm_restore_gc0_kscratch6(cop0);
2240 }
2241
edc89260
JH
2242 if (cpu_guest_has_badinstr)
2243 kvm_restore_gc0_badinstr(cop0);
2244 if (cpu_guest_has_badinstrp)
2245 kvm_restore_gc0_badinstrp(cop0);
2246
4b7de028
JH
2247 if (cpu_guest_has_segments) {
2248 kvm_restore_gc0_segctl0(cop0);
2249 kvm_restore_gc0_segctl1(cop0);
2250 kvm_restore_gc0_segctl2(cop0);
2251 }
2252
5a2f352f
JH
2253 /* restore HTW registers */
2254 if (cpu_guest_has_htw) {
2255 kvm_restore_gc0_pwbase(cop0);
2256 kvm_restore_gc0_pwfield(cop0);
2257 kvm_restore_gc0_pwsize(cop0);
2258 kvm_restore_gc0_pwctl(cop0);
2259 }
2260
c992a4f6
JH
2261 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2262 if (cpu_has_guestctl2)
2263 write_c0_guestctl2(
2264 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2265
273819a6
JH
2266 /*
2267 * We should clear linked load bit to break interrupted atomics. This
2268 * prevents a SC on the next VCPU from succeeding by matching a LL on
2269 * the previous VCPU.
2270 */
2271 if (cpu_guest_has_rw_llb)
2272 write_gc0_lladdr(0);
2273
c992a4f6
JH
2274 return 0;
2275}
2276
2277static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2278{
2279 struct mips_coproc *cop0 = vcpu->arch.cop0;
2280
2281 if (current->flags & PF_VCPU)
2282 kvm_vz_vcpu_save_wired(vcpu);
2283
2284 kvm_lose_fpu(vcpu);
2285
2286 kvm_save_gc0_index(cop0);
2287 kvm_save_gc0_entrylo0(cop0);
2288 kvm_save_gc0_entrylo1(cop0);
2289 kvm_save_gc0_context(cop0);
dffe042f
JH
2290 if (cpu_guest_has_contextconfig)
2291 kvm_save_gc0_contextconfig(cop0);
c992a4f6
JH
2292#ifdef CONFIG_64BIT
2293 kvm_save_gc0_xcontext(cop0);
dffe042f
JH
2294 if (cpu_guest_has_contextconfig)
2295 kvm_save_gc0_xcontextconfig(cop0);
c992a4f6
JH
2296#endif
2297 kvm_save_gc0_pagemask(cop0);
2298 kvm_save_gc0_pagegrain(cop0);
2299 kvm_save_gc0_wired(cop0);
2300 /* allow wired TLB entries to be overwritten */
2301 clear_gc0_wired(MIPSR6_WIRED_WIRED);
2302 kvm_save_gc0_hwrena(cop0);
2303 kvm_save_gc0_badvaddr(cop0);
2304 kvm_save_gc0_entryhi(cop0);
2305 kvm_save_gc0_status(cop0);
2306 kvm_save_gc0_intctl(cop0);
2307 kvm_save_gc0_epc(cop0);
2308 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2309 if (cpu_guest_has_userlocal)
2310 kvm_save_gc0_userlocal(cop0);
2311
2312 /* only save implemented config registers */
2313 kvm_save_gc0_config(cop0);
2314 if (cpu_guest_has_conf1)
2315 kvm_save_gc0_config1(cop0);
2316 if (cpu_guest_has_conf2)
2317 kvm_save_gc0_config2(cop0);
2318 if (cpu_guest_has_conf3)
2319 kvm_save_gc0_config3(cop0);
2320 if (cpu_guest_has_conf4)
2321 kvm_save_gc0_config4(cop0);
2322 if (cpu_guest_has_conf5)
2323 kvm_save_gc0_config5(cop0);
2324 if (cpu_guest_has_conf6)
2325 kvm_save_gc0_config6(cop0);
2326 if (cpu_guest_has_conf7)
2327 kvm_save_gc0_config7(cop0);
2328
2329 kvm_save_gc0_errorepc(cop0);
2330
2331 /* save KScratch registers if enabled in guest */
2332 if (cpu_guest_has_conf4) {
2333 if (cpu_guest_has_kscr(2))
2334 kvm_save_gc0_kscratch1(cop0);
2335 if (cpu_guest_has_kscr(3))
2336 kvm_save_gc0_kscratch2(cop0);
2337 if (cpu_guest_has_kscr(4))
2338 kvm_save_gc0_kscratch3(cop0);
2339 if (cpu_guest_has_kscr(5))
2340 kvm_save_gc0_kscratch4(cop0);
2341 if (cpu_guest_has_kscr(6))
2342 kvm_save_gc0_kscratch5(cop0);
2343 if (cpu_guest_has_kscr(7))
2344 kvm_save_gc0_kscratch6(cop0);
2345 }
2346
edc89260
JH
2347 if (cpu_guest_has_badinstr)
2348 kvm_save_gc0_badinstr(cop0);
2349 if (cpu_guest_has_badinstrp)
2350 kvm_save_gc0_badinstrp(cop0);
2351
4b7de028
JH
2352 if (cpu_guest_has_segments) {
2353 kvm_save_gc0_segctl0(cop0);
2354 kvm_save_gc0_segctl1(cop0);
2355 kvm_save_gc0_segctl2(cop0);
2356 }
2357
5a2f352f
JH
2358 /* save HTW registers if enabled in guest */
2359 if (cpu_guest_has_htw &&
2360 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
2361 kvm_save_gc0_pwbase(cop0);
2362 kvm_save_gc0_pwfield(cop0);
2363 kvm_save_gc0_pwsize(cop0);
2364 kvm_save_gc0_pwctl(cop0);
2365 }
2366
c992a4f6
JH
2367 kvm_vz_save_timer(vcpu);
2368
2369 /* save Root.GuestCtl2 in unused Guest guestctl2 register */
2370 if (cpu_has_guestctl2)
2371 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2372 read_c0_guestctl2();
2373
2374 return 0;
2375}
2376
2377/**
2378 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2379 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
2380 *
2381 * Attempt to resize the guest VTLB by writing guest Config registers. This is
2382 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2383 * entries in the root VTLB.
2384 *
2385 * Returns: The resulting guest VTLB size.
2386 */
2387static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2388{
2389 unsigned int config4 = 0, ret = 0, limit;
2390
2391 /* Write MMUSize - 1 into guest Config registers */
2392 if (cpu_guest_has_conf1)
2393 change_gc0_config1(MIPS_CONF1_TLBS,
2394 (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2395 if (cpu_guest_has_conf4) {
2396 config4 = read_gc0_config4();
2397 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2398 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2399 config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2400 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2401 MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2402 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2403 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2404 config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2405 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2406 MIPS_CONF4_MMUSIZEEXT_SHIFT;
2407 }
2408 write_gc0_config4(config4);
2409 }
2410
2411 /*
2412 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2413 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2414 * not dropped)
2415 */
2416 if (cpu_has_mips_r6) {
2417 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2418 MIPSR6_WIRED_LIMIT_SHIFT;
2419 if (size - 1 <= limit)
2420 limit = 0;
2421 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2422 }
2423
2424 /* Read back MMUSize - 1 */
2425 back_to_back_c0_hazard();
2426 if (cpu_guest_has_conf1)
2427 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2428 MIPS_CONF1_TLBS_SHIFT;
2429 if (config4) {
2430 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2431 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2432 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2433 MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2434 MIPS_CONF1_TLBS_SIZE;
2435 else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2436 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2437 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2438 MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2439 MIPS_CONF1_TLBS_SIZE;
2440 }
2441 return ret + 1;
2442}
2443
2444static int kvm_vz_hardware_enable(void)
2445{
2446 unsigned int mmu_size, guest_mmu_size, ftlb_size;
2447
2448 /*
2449 * ImgTec cores tend to use a shared root/guest TLB. To avoid overlap of
2450 * root wired and guest entries, the guest TLB may need resizing.
2451 */
2452 mmu_size = current_cpu_data.tlbsizevtlb;
2453 ftlb_size = current_cpu_data.tlbsize - mmu_size;
2454
2455 /* Try switching to maximum guest VTLB size for flush */
2456 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2457 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2458 kvm_vz_local_flush_guesttlb_all();
2459
2460 /*
2461 * Reduce to make space for root wired entries and at least 2 root
2462 * non-wired entries. This does assume that long-term wired entries
2463 * won't be added later.
2464 */
2465 guest_mmu_size = mmu_size - num_wired_entries() - 2;
2466 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2467 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2468
2469 /*
2470 * Write the VTLB size, but if another CPU has already written, check it
2471 * matches or we won't provide a consistent view to the guest. If this
2472 * ever happens it suggests an asymmetric number of wired entries.
2473 */
2474 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2475 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2476 "Available guest VTLB size mismatch"))
2477 return -EINVAL;
2478
2479 /*
2480 * Enable virtualization features granting guest direct control of
2481 * certain features:
2482 * CP0=1: Guest coprocessor 0 context.
2483 * AT=Guest: Guest MMU.
2484 * CG=1: Hit (virtual address) CACHE operations (optional).
2485 * CF=1: Guest Config registers.
2486 * CGI=1: Indexed flush CACHE operations (optional).
2487 */
2488 write_c0_guestctl0(MIPS_GCTL0_CP0 |
2489 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2490 MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2491 if (cpu_has_guestctl0ext)
2492 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2493
2494 if (cpu_has_guestid) {
2495 write_c0_guestctl1(0);
2496 kvm_vz_local_flush_roottlb_all_guests();
2497
2498 GUESTID_MASK = current_cpu_data.guestid_mask;
2499 GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2500 GUESTID_VERSION_MASK = ~GUESTID_MASK;
2501
2502 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2503 }
2504
2505 /* clear any pending injected virtual guest interrupts */
2506 if (cpu_has_guestctl2)
2507 clear_c0_guestctl2(0x3f << 10);
2508
2509 return 0;
2510}
2511
2512static void kvm_vz_hardware_disable(void)
2513{
2514 kvm_vz_local_flush_guesttlb_all();
2515
2516 if (cpu_has_guestid) {
2517 write_c0_guestctl1(0);
2518 kvm_vz_local_flush_roottlb_all_guests();
2519 }
2520}
2521
2522static int kvm_vz_check_extension(struct kvm *kvm, long ext)
2523{
2524 int r;
2525
2526 switch (ext) {
2527 case KVM_CAP_MIPS_VZ:
2528 /* we wouldn't be here unless cpu_has_vz */
2529 r = 1;
2530 break;
2531#ifdef CONFIG_64BIT
2532 case KVM_CAP_MIPS_64BIT:
2533 /* We support 64-bit registers/operations and addresses */
2534 r = 2;
2535 break;
2536#endif
2537 default:
2538 r = 0;
2539 break;
2540 }
2541
2542 return r;
2543}
2544
2545static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
2546{
2547 int i;
2548
2549 for_each_possible_cpu(i)
2550 vcpu->arch.vzguestid[i] = 0;
2551
2552 return 0;
2553}
2554
2555static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
2556{
2557 int cpu;
2558
2559 /*
2560 * If the VCPU is freed and reused as another VCPU, we don't want the
2561 * matching pointer wrongly hanging around in last_vcpu[] or
2562 * last_exec_vcpu[].
2563 */
2564 for_each_possible_cpu(cpu) {
2565 if (last_vcpu[cpu] == vcpu)
2566 last_vcpu[cpu] = NULL;
2567 if (last_exec_vcpu[cpu] == vcpu)
2568 last_exec_vcpu[cpu] = NULL;
2569 }
2570}
2571
2572static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
2573{
2574 struct mips_coproc *cop0 = vcpu->arch.cop0;
2575 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
2576
2577 /*
2578 * Start off the timer at the same frequency as the host timer, but the
2579 * soft timer doesn't handle frequencies greater than 1GHz yet.
2580 */
2581 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
2582 count_hz = mips_hpt_frequency;
2583 kvm_mips_init_count(vcpu, count_hz);
2584
2585 /*
2586 * Initialize guest register state to valid architectural reset state.
2587 */
2588
2589 /* PageGrain */
2590 if (cpu_has_mips_r6)
2591 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
2592 /* Wired */
2593 if (cpu_has_mips_r6)
2594 kvm_write_sw_gc0_wired(cop0,
2595 read_gc0_wired() & MIPSR6_WIRED_LIMIT);
2596 /* Status */
2597 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
2598 if (cpu_has_mips_r6)
2599 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
2600 /* IntCtl */
2601 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
2602 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
2603 /* PRId */
2604 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
2605 /* EBase */
2606 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
2607 /* Config */
2608 kvm_save_gc0_config(cop0);
2609 /* architecturally writable (e.g. from guest) */
2610 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
2611 _page_cachable_default >> _CACHE_SHIFT);
2612 /* architecturally read only, but maybe writable from root */
2613 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
2614 if (cpu_guest_has_conf1) {
2615 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
2616 /* Config1 */
2617 kvm_save_gc0_config1(cop0);
2618 /* architecturally read only, but maybe writable from root */
2619 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
2620 MIPS_CONF1_MD |
2621 MIPS_CONF1_PC |
2622 MIPS_CONF1_WR |
2623 MIPS_CONF1_CA |
2624 MIPS_CONF1_FP);
2625 }
2626 if (cpu_guest_has_conf2) {
2627 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
2628 /* Config2 */
2629 kvm_save_gc0_config2(cop0);
2630 }
2631 if (cpu_guest_has_conf3) {
2632 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
2633 /* Config3 */
2634 kvm_save_gc0_config3(cop0);
2635 /* architecturally writable (e.g. from guest) */
2636 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
2637 /* architecturally read only, but maybe writable from root */
2638 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
2639 MIPS_CONF3_BPG |
2640 MIPS_CONF3_ULRI |
2641 MIPS_CONF3_DSP |
2642 MIPS_CONF3_CTXTC |
2643 MIPS_CONF3_ITL |
2644 MIPS_CONF3_LPA |
2645 MIPS_CONF3_VEIC |
2646 MIPS_CONF3_VINT |
2647 MIPS_CONF3_SP |
2648 MIPS_CONF3_CDMM |
2649 MIPS_CONF3_MT |
2650 MIPS_CONF3_SM |
2651 MIPS_CONF3_TL);
2652 }
2653 if (cpu_guest_has_conf4) {
2654 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
2655 /* Config4 */
2656 kvm_save_gc0_config4(cop0);
2657 }
2658 if (cpu_guest_has_conf5) {
2659 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
2660 /* Config5 */
2661 kvm_save_gc0_config5(cop0);
2662 /* architecturally writable (e.g. from guest) */
2663 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
2664 MIPS_CONF5_CV |
2665 MIPS_CONF5_MSAEN |
2666 MIPS_CONF5_UFE |
2667 MIPS_CONF5_FRE |
2668 MIPS_CONF5_SBRI |
2669 MIPS_CONF5_UFR);
2670 /* architecturally read only, but maybe writable from root */
2671 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
2672 }
2673
dffe042f
JH
2674 if (cpu_guest_has_contextconfig) {
2675 /* ContextConfig */
2676 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
2677#ifdef CONFIG_64BIT
2678 /* XContextConfig */
2679 /* bits SEGBITS-13+3:4 set */
2680 kvm_write_sw_gc0_xcontextconfig(cop0,
2681 ((1ull << (cpu_vmbits - 13)) - 1) << 4);
2682#endif
2683 }
2684
4b7de028
JH
2685 /* Implementation dependent, use the legacy layout */
2686 if (cpu_guest_has_segments) {
2687 /* SegCtl0, SegCtl1, SegCtl2 */
2688 kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
2689 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
2690 (_page_cachable_default >> _CACHE_SHIFT) <<
2691 (16 + MIPS_SEGCFG_C_SHIFT));
2692 kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
2693 }
2694
5a2f352f
JH
2695 /* reset HTW registers */
2696 if (cpu_guest_has_htw && cpu_has_mips_r6) {
2697 /* PWField */
2698 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
2699 /* PWSize */
2700 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
2701 }
2702
c992a4f6
JH
2703 /* start with no pending virtual guest interrupts */
2704 if (cpu_has_guestctl2)
2705 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
2706
2707 /* Put PC at reset vector */
2708 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
2709
2710 return 0;
2711}
2712
2713static void kvm_vz_flush_shadow_all(struct kvm *kvm)
2714{
2715 if (cpu_has_guestid) {
2716 /* Flush GuestID for each VCPU individually */
2717 kvm_flush_remote_tlbs(kvm);
2718 } else {
2719 /*
2720 * For each CPU there is a single GPA ASID used by all VCPUs in
2721 * the VM, so it doesn't make sense for the VCPUs to handle
2722 * invalidation of these ASIDs individually.
2723 *
2724 * Instead mark all CPUs as needing ASID invalidation in
2725 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
2726 * kick any running VCPUs so they check asid_flush_mask.
2727 */
2728 cpumask_setall(&kvm->arch.asid_flush_mask);
2729 kvm_flush_remote_tlbs(kvm);
2730 }
2731}
2732
2733static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
2734 const struct kvm_memory_slot *slot)
2735{
2736 kvm_vz_flush_shadow_all(kvm);
2737}
2738
2739static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
2740{
2741 int cpu = smp_processor_id();
2742 int preserve_guest_tlb;
2743
2744 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
2745
2746 if (preserve_guest_tlb)
2747 kvm_vz_vcpu_save_wired(vcpu);
2748
2749 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2750
2751 if (preserve_guest_tlb)
2752 kvm_vz_vcpu_load_wired(vcpu);
2753}
2754
2755static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
2756{
2757 int cpu = smp_processor_id();
2758 int r;
2759
2760 /* Check if we have any exceptions/interrupts pending */
2761 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
2762
2763 kvm_vz_check_requests(vcpu, cpu);
2764 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2765 kvm_vz_vcpu_load_wired(vcpu);
2766
2767 r = vcpu->arch.vcpu_run(run, vcpu);
2768
2769 kvm_vz_vcpu_save_wired(vcpu);
2770
2771 return r;
2772}
2773
2774static struct kvm_mips_callbacks kvm_vz_callbacks = {
2775 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
2776 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
2777 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
2778 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
2779 .handle_addr_err_st = kvm_trap_vz_no_handler,
2780 .handle_addr_err_ld = kvm_trap_vz_no_handler,
2781 .handle_syscall = kvm_trap_vz_no_handler,
2782 .handle_res_inst = kvm_trap_vz_no_handler,
2783 .handle_break = kvm_trap_vz_no_handler,
2784 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
2785 .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
2786
2787 .hardware_enable = kvm_vz_hardware_enable,
2788 .hardware_disable = kvm_vz_hardware_disable,
2789 .check_extension = kvm_vz_check_extension,
2790 .vcpu_init = kvm_vz_vcpu_init,
2791 .vcpu_uninit = kvm_vz_vcpu_uninit,
2792 .vcpu_setup = kvm_vz_vcpu_setup,
2793 .flush_shadow_all = kvm_vz_flush_shadow_all,
2794 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
2795 .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
2796 .queue_timer_int = kvm_vz_queue_timer_int_cb,
2797 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
2798 .queue_io_int = kvm_vz_queue_io_int_cb,
2799 .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
2800 .irq_deliver = kvm_vz_irq_deliver_cb,
2801 .irq_clear = kvm_vz_irq_clear_cb,
2802 .num_regs = kvm_vz_num_regs,
2803 .copy_reg_indices = kvm_vz_copy_reg_indices,
2804 .get_one_reg = kvm_vz_get_one_reg,
2805 .set_one_reg = kvm_vz_set_one_reg,
2806 .vcpu_load = kvm_vz_vcpu_load,
2807 .vcpu_put = kvm_vz_vcpu_put,
2808 .vcpu_run = kvm_vz_vcpu_run,
2809 .vcpu_reenter = kvm_vz_vcpu_reenter,
2810};
2811
2812int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
2813{
2814 if (!cpu_has_vz)
2815 return -ENODEV;
2816
2817 /*
2818 * VZ requires at least 2 KScratch registers, so it should have been
2819 * possible to allocate pgd_reg.
2820 */
2821 if (WARN(pgd_reg == -1,
2822 "pgd_reg not allocated even though cpu_has_vz\n"))
2823 return -ENODEV;
2824
2825 pr_info("Starting KVM with MIPS VZ extensions\n");
2826
2827 *install_callbacks = &kvm_vz_callbacks;
2828 return 0;
2829}