X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=arch%2Fmips%2Fkvm%2Ftlb.c;h=385fbd34e77dee6748caccda20f4b4a50ab219f8;hb=42aa12e74e91;hp=60e4ad0016e7595ec2032a681381e15ae4dc4063;hpb=44bcc922381e24c4f38dc5dfd8d34d60b2ede898;p=linux-2.6-block.git diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 60e4ad0016e7..385fbd34e77d 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -24,6 +24,7 @@ #include #include #include +#include #undef CONFIG_MIPS_MT #include @@ -32,32 +33,26 @@ #define KVM_GUEST_PC_TLB 0 #define KVM_GUEST_SP_TLB 1 -#define PRIx64 "llx" - atomic_t kvm_mips_instance; EXPORT_SYMBOL_GPL(kvm_mips_instance); -/* These function pointers are initialized once the KVM module is loaded */ -kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); -EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn); - -void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn); -EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean); - -bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn); -EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn); - -uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) +static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) { - return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; + int cpu = smp_processor_id(); + + return vcpu->arch.guest_kernel_asid[cpu] & + cpu_asid_mask(&cpu_data[cpu]); } -uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) +static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) { - return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; + int cpu = smp_processor_id(); + + return vcpu->arch.guest_user_asid[cpu] & + cpu_asid_mask(&cpu_data[cpu]); } -inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) +inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.commpage_tlb; } @@ -66,49 +61,15 @@ inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) void kvm_mips_dump_host_tlbs(void) { - unsigned long old_entryhi; - unsigned long old_pagemask; - struct kvm_mips_tlb tlb; unsigned long flags; - int i; local_irq_save(flags); - old_entryhi = read_c0_entryhi(); - old_pagemask = read_c0_pagemask(); - kvm_info("HOST TLBs:\n"); - kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); - - for (i = 0; i < current_cpu_data.tlbsize; i++) { - write_c0_index(i); - mtc0_tlbw_hazard(); + dump_tlb_regs(); + pr_info("\n"); + dump_tlb_all(); - tlb_read(); - tlbw_use_hazard(); - - tlb.tlb_hi = read_c0_entryhi(); - tlb.tlb_lo0 = read_c0_entrylo0(); - tlb.tlb_lo1 = read_c0_entrylo1(); - tlb.tlb_mask = read_c0_pagemask(); - - kvm_info("TLB%c%3d Hi 0x%08lx ", - (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', - i, tlb.tlb_hi); - kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), - (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', - (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', - (tlb.tlb_lo0 >> 3) & 7); - kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), - (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', - (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', - (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); - } - write_c0_entryhi(old_entryhi); - write_c0_pagemask(old_pagemask); - mtc0_tlbw_hazard(); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); @@ -125,74 +86,24 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { tlb = vcpu->arch.guest_tlb[i]; kvm_info("TLB%c%3d Hi 0x%08lx ", - (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', + (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V + ? ' ' : '*', i, tlb.tlb_hi); - kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), - (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', - (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', - (tlb.tlb_lo0 >> 3) & 7); - kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), - (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', - (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', - (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); + kvm_info("Lo0=0x%09llx %c%c attr %lx ", + (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), + (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', + (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', + (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); + kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", + (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), + (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', + (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', + (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, + tlb.tlb_mask); } } EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); -static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) -{ - int srcu_idx, err = 0; - kvm_pfn_t pfn; - - if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) - return 0; - - srcu_idx = srcu_read_lock(&kvm->srcu); - pfn = kvm_mips_gfn_to_pfn(kvm, gfn); - - if (kvm_mips_is_error_pfn(pfn)) { - kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); - err = -EFAULT; - goto out; - } - - kvm->arch.guest_pmap[gfn] = pfn; -out: - srcu_read_unlock(&kvm->srcu, srcu_idx); - return err; -} - -/* Translate guest KSEG0 addresses to Host PA */ -unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, - unsigned long gva) -{ - gfn_t gfn; - uint32_t offset = gva & ~PAGE_MASK; - struct kvm *kvm = vcpu->kvm; - - if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { - kvm_err("%s/%p: Invalid gva: %#lx\n", __func__, - __builtin_return_address(0), gva); - return KVM_INVALID_PAGE; - } - - gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); - - if (gfn >= kvm->arch.guest_pmap_npages) { - kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn, - gva); - return KVM_INVALID_PAGE; - } - - if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) - return KVM_INVALID_ADDR; - - return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; -} -EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa); - /* XXXKYMA: Must be called with interrupts disabled */ /* set flush_dcache_mask == 0 if no dcache flush required */ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, @@ -236,12 +147,12 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, /* Flush D-cache */ if (flush_dcache_mask) { - if (entrylo0 & MIPS3_PG_V) { + if (entrylo0 & ENTRYLO_V) { ++vcpu->stat.flush_dcache_exits; flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask); } - if (entrylo1 & MIPS3_PG_V) { + if (entrylo1 & ENTRYLO_V) { ++vcpu->stat.flush_dcache_exits; flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) | @@ -252,96 +163,34 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, /* Restore old ASID */ write_c0_entryhi(old_entryhi); mtc0_tlbw_hazard(); - tlbw_use_hazard(); local_irq_restore(flags); return 0; } - -/* XXXKYMA: Must be called with interrupts disabled */ -int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, - struct kvm_vcpu *vcpu) -{ - gfn_t gfn; - kvm_pfn_t pfn0, pfn1; - unsigned long vaddr = 0; - unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; - int even; - struct kvm *kvm = vcpu->kvm; - const int flush_dcache_mask = 0; - int ret; - - if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { - kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); - kvm_mips_dump_host_tlbs(); - return -1; - } - - gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); - if (gfn >= kvm->arch.guest_pmap_npages) { - kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, - gfn, badvaddr); - kvm_mips_dump_host_tlbs(); - return -1; - } - even = !(gfn & 0x1); - vaddr = badvaddr & (PAGE_MASK << 1); - - if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) - return -1; - - if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0) - return -1; - - if (even) { - pfn0 = kvm->arch.guest_pmap[gfn]; - pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1]; - } else { - pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1]; - pfn1 = kvm->arch.guest_pmap[gfn]; - } - - entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | - (1 << 2) | (0x1 << 1); - entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | - (1 << 2) | (0x1 << 1); - - preempt_disable(); - entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); - ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, - flush_dcache_mask); - preempt_enable(); - - return ret; -} -EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault); +EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write); int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, struct kvm_vcpu *vcpu) { - kvm_pfn_t pfn0, pfn1; + kvm_pfn_t pfn; unsigned long flags, old_entryhi = 0, vaddr = 0; - unsigned long entrylo0 = 0, entrylo1 = 0; + unsigned long entrylo[2] = { 0, 0 }; + unsigned int pair_idx; - pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; - pfn1 = 0; - entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | - (1 << 2) | (0x1 << 1); - entrylo1 = 0; + pfn = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; + pair_idx = (badvaddr >> PAGE_SHIFT) & 1; + entrylo[pair_idx] = mips3_paddr_to_tlbpfn(pfn << PAGE_SHIFT) | + (0x3 << ENTRYLO_C_SHIFT) | ENTRYLO_D | ENTRYLO_V; local_irq_save(flags); old_entryhi = read_c0_entryhi(); vaddr = badvaddr & (PAGE_MASK << 1); write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu)); - mtc0_tlbw_hazard(); - write_c0_entrylo0(entrylo0); - mtc0_tlbw_hazard(); - write_c0_entrylo1(entrylo1); - mtc0_tlbw_hazard(); + write_c0_entrylo0(entrylo[0]); + write_c0_entrylo1(entrylo[1]); write_c0_index(kvm_mips_get_commpage_asid(vcpu)); mtc0_tlbw_hazard(); tlb_write_indexed(); - mtc0_tlbw_hazard(); tlbw_use_hazard(); kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", @@ -351,68 +200,12 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, /* Restore old ASID */ write_c0_entryhi(old_entryhi); mtc0_tlbw_hazard(); - tlbw_use_hazard(); local_irq_restore(flags); return 0; } EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault); -int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, - struct kvm_mips_tlb *tlb, - unsigned long *hpa0, - unsigned long *hpa1) -{ - unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; - struct kvm *kvm = vcpu->kvm; - kvm_pfn_t pfn0, pfn1; - int ret; - - if ((tlb->tlb_hi & VPN2_MASK) == 0) { - pfn0 = 0; - pfn1 = 0; - } else { - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) - >> PAGE_SHIFT) < 0) - return -1; - - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) - >> PAGE_SHIFT) < 0) - return -1; - - pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) - >> PAGE_SHIFT]; - pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) - >> PAGE_SHIFT]; - } - - if (hpa0) - *hpa0 = pfn0 << PAGE_SHIFT; - - if (hpa1) - *hpa1 = pfn1 << PAGE_SHIFT; - - /* Get attributes from the Guest TLB */ - entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | - (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); - entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | - (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); - - kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, - tlb->tlb_lo0, tlb->tlb_lo1); - - preempt_disable(); - entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? - kvm_mips_get_kernel_asid(vcpu) : - kvm_mips_get_user_asid(vcpu)); - ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, - tlb->tlb_mask); - preempt_enable(); - - return ret; -} -EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault); - int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) { int i; @@ -428,7 +221,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) } kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", - __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1); + __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); return index; } @@ -460,7 +253,6 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) /* Restore old ASID */ write_c0_entryhi(old_entryhi); mtc0_tlbw_hazard(); - tlbw_use_hazard(); local_irq_restore(flags); @@ -491,21 +283,16 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) if (idx > 0) { write_c0_entryhi(UNIQUE_ENTRYHI(idx)); - mtc0_tlbw_hazard(); - write_c0_entrylo0(0); - mtc0_tlbw_hazard(); - write_c0_entrylo1(0); mtc0_tlbw_hazard(); tlb_write_indexed(); - mtc0_tlbw_hazard(); + tlbw_use_hazard(); } write_c0_entryhi(old_entryhi); mtc0_tlbw_hazard(); - tlbw_use_hazard(); local_irq_restore(flags); @@ -533,11 +320,11 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) /* Blast 'em all away. */ for (entry = 0; entry < maxentry; entry++) { write_c0_index(entry); - mtc0_tlbw_hazard(); if (skip_kseg0) { + mtc0_tlbr_hazard(); tlb_read(); - tlbw_use_hazard(); + tlb_read_hazard(); entryhi = read_c0_entryhi(); @@ -548,46 +335,22 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) /* Make sure all entries differ. */ write_c0_entryhi(UNIQUE_ENTRYHI(entry)); - mtc0_tlbw_hazard(); write_c0_entrylo0(0); - mtc0_tlbw_hazard(); write_c0_entrylo1(0); mtc0_tlbw_hazard(); tlb_write_indexed(); - mtc0_tlbw_hazard(); + tlbw_use_hazard(); } - tlbw_use_hazard(); - write_c0_entryhi(old_entryhi); write_c0_pagemask(old_pagemask); mtc0_tlbw_hazard(); - tlbw_use_hazard(); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb); -void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, - struct kvm_vcpu *vcpu) -{ - unsigned long asid = asid_cache(cpu); - - asid += ASID_INC; - if (!(asid & ASID_MASK)) { - if (cpu_has_vtag_icache) - flush_icache_all(); - - kvm_local_flush_tlb_all(); /* start new asid cycle */ - - if (!asid) /* fix version if needed */ - asid = ASID_FIRST_VERSION; - } - - cpu_context(cpu, mm) = asid_cache(cpu) = asid; -} - void kvm_local_flush_tlb_all(void) { unsigned long flags; @@ -607,183 +370,12 @@ void kvm_local_flush_tlb_all(void) write_c0_index(entry); mtc0_tlbw_hazard(); tlb_write_indexed(); + tlbw_use_hazard(); entry++; } - tlbw_use_hazard(); write_c0_entryhi(old_ctx); mtc0_tlbw_hazard(); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all); - -/** - * kvm_mips_migrate_count() - Migrate timer. - * @vcpu: Virtual CPU. - * - * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it - * if it was running prior to being cancelled. - * - * Must be called when the VCPU is migrated to a different CPU to ensure that - * timer expiry during guest execution interrupts the guest and causes the - * interrupt to be delivered in a timely manner. - */ -static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) -{ - if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) - hrtimer_restart(&vcpu->arch.comparecount_timer); -} - -/* Restore ASID once we are scheduled back after preemption */ -void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) -{ - unsigned long flags; - int newasid = 0; - - kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); - - /* Allocate new kernel and user ASIDs if needed */ - - local_irq_save(flags); - - if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & - ASID_VERSION_MASK) { - kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); - vcpu->arch.guest_kernel_asid[cpu] = - vcpu->arch.guest_kernel_mm.context.asid[cpu]; - kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); - vcpu->arch.guest_user_asid[cpu] = - vcpu->arch.guest_user_mm.context.asid[cpu]; - newasid++; - - kvm_debug("[%d]: cpu_context: %#lx\n", cpu, - cpu_context(cpu, current->mm)); - kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", - cpu, vcpu->arch.guest_kernel_asid[cpu]); - kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, - vcpu->arch.guest_user_asid[cpu]); - } - - if (vcpu->arch.last_sched_cpu != cpu) { - kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", - vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); - /* - * Migrate the timer interrupt to the current CPU so that it - * always interrupts the guest and synchronously triggers a - * guest timer interrupt. - */ - kvm_mips_migrate_count(vcpu); - } - - if (!newasid) { - /* - * If we preempted while the guest was executing, then reload - * the pre-empted ASID - */ - if (current->flags & PF_VCPU) { - write_c0_entryhi(vcpu->arch. - preempt_entryhi & ASID_MASK); - ehb(); - } - } else { - /* New ASIDs were allocated for the VM */ - - /* - * Were we in guest context? If so then the pre-empted ASID is - * no longer valid, we need to set it to what it should be based - * on the mode of the Guest (Kernel/User) - */ - if (current->flags & PF_VCPU) { - if (KVM_GUEST_KERNEL_MODE(vcpu)) - write_c0_entryhi(vcpu->arch. - guest_kernel_asid[cpu] & - ASID_MASK); - else - write_c0_entryhi(vcpu->arch. - guest_user_asid[cpu] & - ASID_MASK); - ehb(); - } - } - - /* restore guest state to registers */ - kvm_mips_callbacks->vcpu_set_regs(vcpu); - - local_irq_restore(flags); - -} -EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load); - -/* ASID can change if another task is scheduled during preemption */ -void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) -{ - unsigned long flags; - uint32_t cpu; - - local_irq_save(flags); - - cpu = smp_processor_id(); - - vcpu->arch.preempt_entryhi = read_c0_entryhi(); - vcpu->arch.last_sched_cpu = cpu; - - /* save guest state in registers */ - kvm_mips_callbacks->vcpu_get_regs(vcpu); - - if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & - ASID_VERSION_MASK)) { - kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, - cpu_context(cpu, current->mm)); - drop_mmu_context(current->mm, cpu); - } - write_c0_entryhi(cpu_asid(cpu, current->mm)); - ehb(); - - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put); - -uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) -{ - struct mips_coproc *cop0 = vcpu->arch.cop0; - unsigned long paddr, flags, vpn2, asid; - uint32_t inst; - int index; - - if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 || - KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { - local_irq_save(flags); - index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc); - if (index >= 0) { - inst = *(opc); - } else { - vpn2 = (unsigned long) opc & VPN2_MASK; - asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK; - index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid); - if (index < 0) { - kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", - __func__, opc, vcpu, read_c0_entryhi()); - kvm_mips_dump_host_tlbs(); - local_irq_restore(flags); - return KVM_INVALID_INST; - } - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, - &vcpu->arch. - guest_tlb[index], - NULL, NULL); - inst = *(opc); - } - local_irq_restore(flags); - } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { - paddr = - kvm_mips_translate_guest_kseg0_to_hpa(vcpu, - (unsigned long) opc); - inst = *(uint32_t *) CKSEG0ADDR(paddr); - } else { - kvm_err("%s: illegal address: %p\n", __func__, opc); - return KVM_INVALID_INST; - } - - return inst; -} -EXPORT_SYMBOL_GPL(kvm_get_inst);