2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
32 #define KVM_GUEST_PC_TLB 0
33 #define KVM_GUEST_SP_TLB 1
37 atomic_t kvm_mips_instance;
38 EXPORT_SYMBOL_GPL(kvm_mips_instance);
40 /* These function pointers are initialized once the KVM module is loaded */
41 kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
42 EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
44 void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
45 EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
47 bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
48 EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
52 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
55 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
57 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
60 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
62 return vcpu->kvm->arch.commpage_tlb;
65 /* Structure defining an tlb entry data set. */
67 void kvm_mips_dump_host_tlbs(void)
69 unsigned long old_entryhi;
70 unsigned long old_pagemask;
71 struct kvm_mips_tlb tlb;
75 local_irq_save(flags);
77 old_entryhi = read_c0_entryhi();
78 old_pagemask = read_c0_pagemask();
80 kvm_info("HOST TLBs:\n");
81 kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
83 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 tlb.tlb_hi = read_c0_entryhi();
91 tlb.tlb_lo0 = read_c0_entrylo0();
92 tlb.tlb_lo1 = read_c0_entrylo1();
93 tlb.tlb_mask = read_c0_pagemask();
95 kvm_info("TLB%c%3d Hi 0x%08lx ",
96 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
98 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
99 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
100 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
101 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
102 (tlb.tlb_lo0 >> 3) & 7);
103 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
105 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
109 write_c0_entryhi(old_entryhi);
110 write_c0_pagemask(old_pagemask);
112 local_irq_restore(flags);
114 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
116 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
118 struct mips_coproc *cop0 = vcpu->arch.cop0;
119 struct kvm_mips_tlb tlb;
122 kvm_info("Guest TLBs:\n");
123 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
125 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
126 tlb = vcpu->arch.guest_tlb[i];
127 kvm_info("TLB%c%3d Hi 0x%08lx ",
128 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
130 kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
131 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
132 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
133 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
134 (tlb.tlb_lo0 >> 3) & 7);
135 kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
137 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
142 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
144 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
146 int srcu_idx, err = 0;
149 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
152 srcu_idx = srcu_read_lock(&kvm->srcu);
153 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
155 if (kvm_mips_is_error_pfn(pfn)) {
156 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
161 kvm->arch.guest_pmap[gfn] = pfn;
163 srcu_read_unlock(&kvm->srcu, srcu_idx);
167 /* Translate guest KSEG0 addresses to Host PA */
168 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
172 uint32_t offset = gva & ~PAGE_MASK;
173 struct kvm *kvm = vcpu->kvm;
175 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
176 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
177 __builtin_return_address(0), gva);
178 return KVM_INVALID_PAGE;
181 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
183 if (gfn >= kvm->arch.guest_pmap_npages) {
184 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
186 return KVM_INVALID_PAGE;
189 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
190 return KVM_INVALID_ADDR;
192 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
194 EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
196 /* XXXKYMA: Must be called with interrupts disabled */
197 /* set flush_dcache_mask == 0 if no dcache flush required */
198 int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
199 unsigned long entrylo0, unsigned long entrylo1,
200 int flush_dcache_mask)
203 unsigned long old_entryhi;
206 local_irq_save(flags);
208 old_entryhi = read_c0_entryhi();
209 write_c0_entryhi(entryhi);
214 idx = read_c0_index();
216 if (idx > current_cpu_data.tlbsize) {
217 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
218 kvm_mips_dump_host_tlbs();
219 local_irq_restore(flags);
223 write_c0_entrylo0(entrylo0);
224 write_c0_entrylo1(entrylo1);
233 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
234 vcpu->arch.pc, idx, read_c0_entryhi(),
235 read_c0_entrylo0(), read_c0_entrylo1());
238 if (flush_dcache_mask) {
239 if (entrylo0 & MIPS3_PG_V) {
240 ++vcpu->stat.flush_dcache_exits;
241 flush_data_cache_page((entryhi & VPN2_MASK) &
244 if (entrylo1 & MIPS3_PG_V) {
245 ++vcpu->stat.flush_dcache_exits;
246 flush_data_cache_page(((entryhi & VPN2_MASK) &
247 ~flush_dcache_mask) |
248 (0x1 << PAGE_SHIFT));
252 /* Restore old ASID */
253 write_c0_entryhi(old_entryhi);
256 local_irq_restore(flags);
260 /* XXXKYMA: Must be called with interrupts disabled */
261 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
262 struct kvm_vcpu *vcpu)
265 kvm_pfn_t pfn0, pfn1;
266 unsigned long vaddr = 0;
267 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
269 struct kvm *kvm = vcpu->kvm;
270 const int flush_dcache_mask = 0;
273 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
274 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
275 kvm_mips_dump_host_tlbs();
279 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
280 if (gfn >= kvm->arch.guest_pmap_npages) {
281 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
283 kvm_mips_dump_host_tlbs();
287 vaddr = badvaddr & (PAGE_MASK << 1);
289 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
292 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
296 pfn0 = kvm->arch.guest_pmap[gfn];
297 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
299 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
300 pfn1 = kvm->arch.guest_pmap[gfn];
303 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
304 (1 << 2) | (0x1 << 1);
305 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
306 (1 << 2) | (0x1 << 1);
309 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
310 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
316 EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
318 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
319 struct kvm_vcpu *vcpu)
321 kvm_pfn_t pfn0, pfn1;
322 unsigned long flags, old_entryhi = 0, vaddr = 0;
323 unsigned long entrylo0 = 0, entrylo1 = 0;
325 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
327 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
328 (1 << 2) | (0x1 << 1);
331 local_irq_save(flags);
333 old_entryhi = read_c0_entryhi();
334 vaddr = badvaddr & (PAGE_MASK << 1);
335 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
337 write_c0_entrylo0(entrylo0);
339 write_c0_entrylo1(entrylo1);
341 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
347 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
348 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
349 read_c0_entrylo0(), read_c0_entrylo1());
351 /* Restore old ASID */
352 write_c0_entryhi(old_entryhi);
355 local_irq_restore(flags);
359 EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
361 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
362 struct kvm_mips_tlb *tlb,
366 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
367 struct kvm *kvm = vcpu->kvm;
368 kvm_pfn_t pfn0, pfn1;
371 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
375 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
379 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
383 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
385 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
390 *hpa0 = pfn0 << PAGE_SHIFT;
393 *hpa1 = pfn1 << PAGE_SHIFT;
395 /* Get attributes from the Guest TLB */
396 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
397 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
398 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
399 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
401 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
402 tlb->tlb_lo0, tlb->tlb_lo1);
405 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
406 kvm_mips_get_kernel_asid(vcpu) :
407 kvm_mips_get_user_asid(vcpu));
408 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
414 EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
416 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
420 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
422 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
423 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
424 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
430 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
431 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
435 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
437 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
439 unsigned long old_entryhi, flags;
442 local_irq_save(flags);
444 old_entryhi = read_c0_entryhi();
446 if (KVM_GUEST_KERNEL_MODE(vcpu))
447 write_c0_entryhi((vaddr & VPN2_MASK) |
448 kvm_mips_get_kernel_asid(vcpu));
450 write_c0_entryhi((vaddr & VPN2_MASK) |
451 kvm_mips_get_user_asid(vcpu));
458 idx = read_c0_index();
460 /* Restore old ASID */
461 write_c0_entryhi(old_entryhi);
465 local_irq_restore(flags);
467 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
471 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
473 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
476 unsigned long flags, old_entryhi;
478 local_irq_save(flags);
480 old_entryhi = read_c0_entryhi();
482 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
487 idx = read_c0_index();
489 if (idx >= current_cpu_data.tlbsize)
493 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
496 write_c0_entrylo0(0);
499 write_c0_entrylo1(0);
506 write_c0_entryhi(old_entryhi);
510 local_irq_restore(flags);
513 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
514 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
518 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
520 void kvm_mips_flush_host_tlb(int skip_kseg0)
523 unsigned long old_entryhi, entryhi;
524 unsigned long old_pagemask;
526 int maxentry = current_cpu_data.tlbsize;
528 local_irq_save(flags);
530 old_entryhi = read_c0_entryhi();
531 old_pagemask = read_c0_pagemask();
533 /* Blast 'em all away. */
534 for (entry = 0; entry < maxentry; entry++) {
535 write_c0_index(entry);
542 entryhi = read_c0_entryhi();
544 /* Don't blow away guest kernel entries */
545 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
549 /* Make sure all entries differ. */
550 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
552 write_c0_entrylo0(0);
554 write_c0_entrylo1(0);
563 write_c0_entryhi(old_entryhi);
564 write_c0_pagemask(old_pagemask);
568 local_irq_restore(flags);
570 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
572 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
573 struct kvm_vcpu *vcpu)
575 unsigned long asid = asid_cache(cpu);
578 if (!(asid & ASID_MASK)) {
579 if (cpu_has_vtag_icache)
582 kvm_local_flush_tlb_all(); /* start new asid cycle */
584 if (!asid) /* fix version if needed */
585 asid = ASID_FIRST_VERSION;
588 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
591 void kvm_local_flush_tlb_all(void)
594 unsigned long old_ctx;
597 local_irq_save(flags);
598 /* Save old context and create impossible VPN2 value */
599 old_ctx = read_c0_entryhi();
600 write_c0_entrylo0(0);
601 write_c0_entrylo1(0);
603 /* Blast 'em all away. */
604 while (entry < current_cpu_data.tlbsize) {
605 /* Make sure all entries differ. */
606 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
607 write_c0_index(entry);
613 write_c0_entryhi(old_ctx);
616 local_irq_restore(flags);
618 EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
621 * kvm_mips_migrate_count() - Migrate timer.
622 * @vcpu: Virtual CPU.
624 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
625 * if it was running prior to being cancelled.
627 * Must be called when the VCPU is migrated to a different CPU to ensure that
628 * timer expiry during guest execution interrupts the guest and causes the
629 * interrupt to be delivered in a timely manner.
631 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
633 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
634 hrtimer_restart(&vcpu->arch.comparecount_timer);
637 /* Restore ASID once we are scheduled back after preemption */
638 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
643 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
645 /* Allocate new kernel and user ASIDs if needed */
647 local_irq_save(flags);
649 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
651 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
652 vcpu->arch.guest_kernel_asid[cpu] =
653 vcpu->arch.guest_kernel_mm.context.asid[cpu];
654 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
655 vcpu->arch.guest_user_asid[cpu] =
656 vcpu->arch.guest_user_mm.context.asid[cpu];
659 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
660 cpu_context(cpu, current->mm));
661 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
662 cpu, vcpu->arch.guest_kernel_asid[cpu]);
663 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
664 vcpu->arch.guest_user_asid[cpu]);
667 if (vcpu->arch.last_sched_cpu != cpu) {
668 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
669 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
671 * Migrate the timer interrupt to the current CPU so that it
672 * always interrupts the guest and synchronously triggers a
673 * guest timer interrupt.
675 kvm_mips_migrate_count(vcpu);
680 * If we preempted while the guest was executing, then reload
681 * the pre-empted ASID
683 if (current->flags & PF_VCPU) {
684 write_c0_entryhi(vcpu->arch.
685 preempt_entryhi & ASID_MASK);
689 /* New ASIDs were allocated for the VM */
692 * Were we in guest context? If so then the pre-empted ASID is
693 * no longer valid, we need to set it to what it should be based
694 * on the mode of the Guest (Kernel/User)
696 if (current->flags & PF_VCPU) {
697 if (KVM_GUEST_KERNEL_MODE(vcpu))
698 write_c0_entryhi(vcpu->arch.
699 guest_kernel_asid[cpu] &
702 write_c0_entryhi(vcpu->arch.
703 guest_user_asid[cpu] &
709 /* restore guest state to registers */
710 kvm_mips_callbacks->vcpu_set_regs(vcpu);
712 local_irq_restore(flags);
715 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
717 /* ASID can change if another task is scheduled during preemption */
718 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
723 local_irq_save(flags);
725 cpu = smp_processor_id();
727 vcpu->arch.preempt_entryhi = read_c0_entryhi();
728 vcpu->arch.last_sched_cpu = cpu;
730 /* save guest state in registers */
731 kvm_mips_callbacks->vcpu_get_regs(vcpu);
733 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
734 ASID_VERSION_MASK)) {
735 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
736 cpu_context(cpu, current->mm));
737 drop_mmu_context(current->mm, cpu);
739 write_c0_entryhi(cpu_asid(cpu, current->mm));
742 local_irq_restore(flags);
744 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
746 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
748 struct mips_coproc *cop0 = vcpu->arch.cop0;
749 unsigned long paddr, flags, vpn2, asid;
753 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
754 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
755 local_irq_save(flags);
756 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
760 vpn2 = (unsigned long) opc & VPN2_MASK;
761 asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
762 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
764 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
765 __func__, opc, vcpu, read_c0_entryhi());
766 kvm_mips_dump_host_tlbs();
767 local_irq_restore(flags);
768 return KVM_INVALID_INST;
770 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
776 local_irq_restore(flags);
777 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
779 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
780 (unsigned long) opc);
781 inst = *(uint32_t *) CKSEG0ADDR(paddr);
783 kvm_err("%s: illegal address: %p\n", __func__, opc);
784 return KVM_INVALID_INST;
789 EXPORT_SYMBOL_GPL(kvm_get_inst);