2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
23 #include <asm/bootinfo.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
38 atomic_t kvm_mips_instance;
39 EXPORT_SYMBOL(kvm_mips_instance);
41 /* These function pointers are initialized once the KVM module is loaded */
42 pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
43 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
45 void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
46 EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
48 bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
49 EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
57 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
62 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
64 return vcpu->kvm->arch.commpage_tlb;
69 * Structure defining an tlb entry data set.
72 void kvm_mips_dump_host_tlbs(void)
74 unsigned long old_entryhi;
75 unsigned long old_pagemask;
76 struct kvm_mips_tlb tlb;
80 local_irq_save(flags);
82 old_entryhi = read_c0_entryhi();
83 old_pagemask = read_c0_pagemask();
85 printk("HOST TLBs:\n");
86 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
88 for (i = 0; i < current_cpu_data.tlbsize; i++) {
95 tlb.tlb_hi = read_c0_entryhi();
96 tlb.tlb_lo0 = read_c0_entrylo0();
97 tlb.tlb_lo1 = read_c0_entrylo1();
98 tlb.tlb_mask = read_c0_pagemask();
100 printk("TLB%c%3d Hi 0x%08lx ",
101 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
103 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
105 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo0 >> 3) & 7);
108 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
109 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
110 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
111 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
112 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
114 write_c0_entryhi(old_entryhi);
115 write_c0_pagemask(old_pagemask);
117 local_irq_restore(flags);
120 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
122 struct mips_coproc *cop0 = vcpu->arch.cop0;
123 struct kvm_mips_tlb tlb;
126 printk("Guest TLBs:\n");
127 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
129 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
130 tlb = vcpu->arch.guest_tlb[i];
131 printk("TLB%c%3d Hi 0x%08lx ",
132 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
134 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
135 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
136 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
137 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
138 (tlb.tlb_lo0 >> 3) & 7);
139 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
140 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
141 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
142 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
143 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
147 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
149 int srcu_idx, err = 0;
152 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
155 srcu_idx = srcu_read_lock(&kvm->srcu);
156 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
158 if (kvm_mips_is_error_pfn(pfn)) {
159 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
164 kvm->arch.guest_pmap[gfn] = pfn;
166 srcu_read_unlock(&kvm->srcu, srcu_idx);
170 /* Translate guest KSEG0 addresses to Host PA */
171 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
175 uint32_t offset = gva & ~PAGE_MASK;
176 struct kvm *kvm = vcpu->kvm;
178 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
179 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
180 __builtin_return_address(0), gva);
181 return KVM_INVALID_PAGE;
184 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
186 if (gfn >= kvm->arch.guest_pmap_npages) {
187 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
189 return KVM_INVALID_PAGE;
192 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
193 return KVM_INVALID_ADDR;
195 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
198 /* XXXKYMA: Must be called with interrupts disabled */
199 /* set flush_dcache_mask == 0 if no dcache flush required */
201 kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
202 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
205 unsigned long old_entryhi;
208 local_irq_save(flags);
211 old_entryhi = read_c0_entryhi();
212 write_c0_entryhi(entryhi);
217 idx = read_c0_index();
219 if (idx > current_cpu_data.tlbsize) {
220 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
221 kvm_mips_dump_host_tlbs();
225 write_c0_entrylo0(entrylo0);
226 write_c0_entrylo1(entrylo1);
237 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
238 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
239 vcpu->arch.pc, idx, read_c0_entryhi(),
240 read_c0_entrylo0(), read_c0_entrylo1());
245 if (flush_dcache_mask) {
246 if (entrylo0 & MIPS3_PG_V) {
247 ++vcpu->stat.flush_dcache_exits;
248 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
250 if (entrylo1 & MIPS3_PG_V) {
251 ++vcpu->stat.flush_dcache_exits;
252 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
253 (0x1 << PAGE_SHIFT));
257 /* Restore old ASID */
258 write_c0_entryhi(old_entryhi);
261 local_irq_restore(flags);
266 /* XXXKYMA: Must be called with interrupts disabled */
267 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
268 struct kvm_vcpu *vcpu)
272 unsigned long vaddr = 0;
273 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
275 struct kvm *kvm = vcpu->kvm;
276 const int flush_dcache_mask = 0;
279 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
280 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
281 kvm_mips_dump_host_tlbs();
285 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
286 if (gfn >= kvm->arch.guest_pmap_npages) {
287 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
289 kvm_mips_dump_host_tlbs();
293 vaddr = badvaddr & (PAGE_MASK << 1);
295 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
298 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
302 pfn0 = kvm->arch.guest_pmap[gfn];
303 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
305 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
306 pfn1 = kvm->arch.guest_pmap[gfn];
309 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
310 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
312 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
315 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
319 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
320 struct kvm_vcpu *vcpu)
323 unsigned long flags, old_entryhi = 0, vaddr = 0;
324 unsigned long entrylo0 = 0, entrylo1 = 0;
327 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
329 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
333 local_irq_save(flags);
335 old_entryhi = read_c0_entryhi();
336 vaddr = badvaddr & (PAGE_MASK << 1);
337 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
339 write_c0_entrylo0(entrylo0);
341 write_c0_entrylo1(entrylo1);
343 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
350 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
351 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
352 read_c0_entrylo0(), read_c0_entrylo1());
355 /* Restore old ASID */
356 write_c0_entryhi(old_entryhi);
359 local_irq_restore(flags);
365 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
366 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
368 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
369 struct kvm *kvm = vcpu->kvm;
373 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
377 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
380 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
383 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
384 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
388 *hpa0 = pfn0 << PAGE_SHIFT;
391 *hpa1 = pfn1 << PAGE_SHIFT;
393 /* Get attributes from the Guest TLB */
394 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
395 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
396 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
397 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
398 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
399 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
402 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
403 tlb->tlb_lo0, tlb->tlb_lo1);
406 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
410 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
414 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
417 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
418 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
419 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
426 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
427 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
433 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
435 unsigned long old_entryhi, flags;
439 local_irq_save(flags);
441 old_entryhi = read_c0_entryhi();
443 if (KVM_GUEST_KERNEL_MODE(vcpu))
444 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
446 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
453 idx = read_c0_index();
455 /* Restore old ASID */
456 write_c0_entryhi(old_entryhi);
460 local_irq_restore(flags);
463 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
469 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
472 unsigned long flags, old_entryhi;
474 local_irq_save(flags);
477 old_entryhi = read_c0_entryhi();
479 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
484 idx = read_c0_index();
486 if (idx >= current_cpu_data.tlbsize)
490 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
493 write_c0_entrylo0(0);
496 write_c0_entrylo1(0);
503 write_c0_entryhi(old_entryhi);
507 local_irq_restore(flags);
511 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
512 (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
519 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
520 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
522 unsigned long flags, old_entryhi;
524 if (index >= current_cpu_data.tlbsize)
527 local_irq_save(flags);
530 old_entryhi = read_c0_entryhi();
532 write_c0_entryhi(UNIQUE_ENTRYHI(index));
535 write_c0_index(index);
538 write_c0_entrylo0(0);
541 write_c0_entrylo1(0);
548 write_c0_entryhi(old_entryhi);
552 local_irq_restore(flags);
557 void kvm_mips_flush_host_tlb(int skip_kseg0)
560 unsigned long old_entryhi, entryhi;
561 unsigned long old_pagemask;
563 int maxentry = current_cpu_data.tlbsize;
566 local_irq_save(flags);
568 old_entryhi = read_c0_entryhi();
569 old_pagemask = read_c0_pagemask();
571 /* Blast 'em all away. */
572 for (entry = 0; entry < maxentry; entry++) {
574 write_c0_index(entry);
581 entryhi = read_c0_entryhi();
583 /* Don't blow away guest kernel entries */
584 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
589 /* Make sure all entries differ. */
590 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
592 write_c0_entrylo0(0);
594 write_c0_entrylo1(0);
603 write_c0_entryhi(old_entryhi);
604 write_c0_pagemask(old_pagemask);
608 local_irq_restore(flags);
612 kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
613 struct kvm_vcpu *vcpu)
615 unsigned long asid = asid_cache(cpu);
617 if (!((asid += ASID_INC) & ASID_MASK)) {
618 if (cpu_has_vtag_icache) {
622 kvm_local_flush_tlb_all(); /* start new asid cycle */
624 if (!asid) /* fix version if needed */
625 asid = ASID_FIRST_VERSION;
628 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
631 void kvm_local_flush_tlb_all(void)
634 unsigned long old_ctx;
637 local_irq_save(flags);
638 /* Save old context and create impossible VPN2 value */
639 old_ctx = read_c0_entryhi();
640 write_c0_entrylo0(0);
641 write_c0_entrylo1(0);
643 /* Blast 'em all away. */
644 while (entry < current_cpu_data.tlbsize) {
645 /* Make sure all entries differ. */
646 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
647 write_c0_index(entry);
653 write_c0_entryhi(old_ctx);
656 local_irq_restore(flags);
660 * kvm_mips_migrate_count() - Migrate timer.
661 * @vcpu: Virtual CPU.
663 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
664 * if it was running prior to being cancelled.
666 * Must be called when the VCPU is migrated to a different CPU to ensure that
667 * timer expiry during guest execution interrupts the guest and causes the
668 * interrupt to be delivered in a timely manner.
670 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
672 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
673 hrtimer_restart(&vcpu->arch.comparecount_timer);
676 /* Restore ASID once we are scheduled back after preemption */
677 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
683 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
686 /* Alocate new kernel and user ASIDs if needed */
688 local_irq_save(flags);
691 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
692 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
693 vcpu->arch.guest_kernel_asid[cpu] =
694 vcpu->arch.guest_kernel_mm.context.asid[cpu];
695 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
696 vcpu->arch.guest_user_asid[cpu] =
697 vcpu->arch.guest_user_mm.context.asid[cpu];
700 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
701 cpu_context(cpu, current->mm));
702 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
703 cpu, vcpu->arch.guest_kernel_asid[cpu]);
704 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
705 vcpu->arch.guest_user_asid[cpu]);
708 if (vcpu->arch.last_sched_cpu != cpu) {
709 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
710 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
712 * Migrate the timer interrupt to the current CPU so that it
713 * always interrupts the guest and synchronously triggers a
714 * guest timer interrupt.
716 kvm_mips_migrate_count(vcpu);
720 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
721 if (current->flags & PF_VCPU) {
722 write_c0_entryhi(vcpu->arch.
723 preempt_entryhi & ASID_MASK);
727 /* New ASIDs were allocated for the VM */
729 /* Were we in guest context? If so then the pre-empted ASID is no longer
730 * valid, we need to set it to what it should be based on the mode of
731 * the Guest (Kernel/User)
733 if (current->flags & PF_VCPU) {
734 if (KVM_GUEST_KERNEL_MODE(vcpu))
735 write_c0_entryhi(vcpu->arch.
736 guest_kernel_asid[cpu] &
739 write_c0_entryhi(vcpu->arch.
740 guest_user_asid[cpu] &
746 local_irq_restore(flags);
750 /* ASID can change if another task is scheduled during preemption */
751 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
756 local_irq_save(flags);
758 cpu = smp_processor_id();
761 vcpu->arch.preempt_entryhi = read_c0_entryhi();
762 vcpu->arch.last_sched_cpu = cpu;
764 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
765 ASID_VERSION_MASK)) {
766 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
767 cpu_context(cpu, current->mm));
768 drop_mmu_context(current->mm, cpu);
770 write_c0_entryhi(cpu_asid(cpu, current->mm));
773 local_irq_restore(flags);
776 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
778 struct mips_coproc *cop0 = vcpu->arch.cop0;
779 unsigned long paddr, flags;
783 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
784 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
785 local_irq_save(flags);
786 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
791 kvm_mips_guest_tlb_lookup(vcpu,
792 ((unsigned long) opc & VPN2_MASK)
794 (kvm_read_c0_guest_entryhi
795 (cop0) & ASID_MASK));
798 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
799 __func__, opc, vcpu, read_c0_entryhi());
800 kvm_mips_dump_host_tlbs();
801 local_irq_restore(flags);
802 return KVM_INVALID_INST;
804 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
810 local_irq_restore(flags);
811 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
813 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
814 (unsigned long) opc);
815 inst = *(uint32_t *) CKSEG0ADDR(paddr);
817 kvm_err("%s: illegal address: %p\n", __func__, opc);
818 return KVM_INVALID_INST;
824 EXPORT_SYMBOL(kvm_local_flush_tlb_all);
825 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
826 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
827 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
828 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
829 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
830 EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
831 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
832 EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
833 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
834 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
835 EXPORT_SYMBOL(kvm_get_inst);
836 EXPORT_SYMBOL(kvm_arch_vcpu_load);
837 EXPORT_SYMBOL(kvm_arch_vcpu_put);