2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 #include <linux/kvm_host.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
28 #include <asm/r4kcache.h>
29 #define CONFIG_MIPS_MT
31 #define KVM_GUEST_PC_TLB 0
32 #define KVM_GUEST_SP_TLB 1
36 /* Use VZ EntryHi.EHINV to invalidate TLB entries */
37 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
39 atomic_t kvm_mips_instance;
40 EXPORT_SYMBOL(kvm_mips_instance);
42 /* These function pointers are initialized once the KVM module is loaded */
43 pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
44 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
46 void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
47 EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
49 bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
50 EXPORT_SYMBOL(kvm_mips_is_error_pfn);
52 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
54 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
58 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
60 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
63 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
65 return vcpu->kvm->arch.commpage_tlb;
70 * Structure defining an tlb entry data set.
73 void kvm_mips_dump_host_tlbs(void)
75 unsigned long old_entryhi;
76 unsigned long old_pagemask;
77 struct kvm_mips_tlb tlb;
81 local_irq_save(flags);
83 old_entryhi = read_c0_entryhi();
84 old_pagemask = read_c0_pagemask();
86 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
89 for (i = 0; i < current_cpu_data.tlbsize; i++) {
96 tlb.tlb_hi = read_c0_entryhi();
97 tlb.tlb_lo0 = read_c0_entrylo0();
98 tlb.tlb_lo1 = read_c0_entrylo1();
99 tlb.tlb_mask = read_c0_pagemask();
101 printk("TLB%c%3d Hi 0x%08lx ",
102 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
104 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
105 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
106 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
107 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
108 (tlb.tlb_lo0 >> 3) & 7);
109 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
110 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
111 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
112 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
113 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
115 write_c0_entryhi(old_entryhi);
116 write_c0_pagemask(old_pagemask);
118 local_irq_restore(flags);
121 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
123 struct mips_coproc *cop0 = vcpu->arch.cop0;
124 struct kvm_mips_tlb tlb;
127 printk("Guest TLBs:\n");
128 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
130 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
131 tlb = vcpu->arch.guest_tlb[i];
132 printk("TLB%c%3d Hi 0x%08lx ",
133 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
135 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
137 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo0 >> 3) & 7);
140 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
141 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
142 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
143 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
144 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
148 void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
151 volatile struct kvm_mips_tlb tlb;
153 printk("Shadow TLBs:\n");
154 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
155 tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
156 printk("TLB%c%3d Hi 0x%08lx ",
157 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
159 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
160 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
161 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
162 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
163 (tlb.tlb_lo0 >> 3) & 7);
164 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
165 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
166 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
167 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
168 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
172 static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
176 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
179 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
181 if (kvm_mips_is_error_pfn(pfn)) {
182 panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
185 kvm->arch.guest_pmap[gfn] = pfn;
189 /* Translate guest KSEG0 addresses to Host PA */
190 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
194 uint32_t offset = gva & ~PAGE_MASK;
195 struct kvm *kvm = vcpu->kvm;
197 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
198 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
199 __builtin_return_address(0), gva);
200 return KVM_INVALID_PAGE;
203 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
205 if (gfn >= kvm->arch.guest_pmap_npages) {
206 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
208 return KVM_INVALID_PAGE;
210 kvm_mips_map_page(vcpu->kvm, gfn);
211 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
214 /* XXXKYMA: Must be called with interrupts disabled */
215 /* set flush_dcache_mask == 0 if no dcache flush required */
217 kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
218 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
221 unsigned long old_entryhi;
224 local_irq_save(flags);
227 old_entryhi = read_c0_entryhi();
228 write_c0_entryhi(entryhi);
233 idx = read_c0_index();
235 if (idx > current_cpu_data.tlbsize) {
236 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
237 kvm_mips_dump_host_tlbs();
242 idx = read_c0_random() % current_cpu_data.tlbsize;
246 write_c0_entrylo0(entrylo0);
247 write_c0_entrylo1(entrylo1);
255 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
256 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
257 vcpu->arch.pc, idx, read_c0_entryhi(),
258 read_c0_entrylo0(), read_c0_entrylo1());
263 if (flush_dcache_mask) {
264 if (entrylo0 & MIPS3_PG_V) {
265 ++vcpu->stat.flush_dcache_exits;
266 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
268 if (entrylo1 & MIPS3_PG_V) {
269 ++vcpu->stat.flush_dcache_exits;
270 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
271 (0x1 << PAGE_SHIFT));
275 /* Restore old ASID */
276 write_c0_entryhi(old_entryhi);
279 local_irq_restore(flags);
284 /* XXXKYMA: Must be called with interrupts disabled */
285 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
286 struct kvm_vcpu *vcpu)
290 unsigned long vaddr = 0;
291 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
293 struct kvm *kvm = vcpu->kvm;
294 const int flush_dcache_mask = 0;
297 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
298 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
299 kvm_mips_dump_host_tlbs();
303 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
304 if (gfn >= kvm->arch.guest_pmap_npages) {
305 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
307 kvm_mips_dump_host_tlbs();
311 vaddr = badvaddr & (PAGE_MASK << 1);
313 kvm_mips_map_page(vcpu->kvm, gfn);
314 kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
317 pfn0 = kvm->arch.guest_pmap[gfn];
318 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
320 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
321 pfn1 = kvm->arch.guest_pmap[gfn];
324 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
325 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
327 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
330 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
334 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
335 struct kvm_vcpu *vcpu)
338 unsigned long flags, old_entryhi = 0, vaddr = 0;
339 unsigned long entrylo0 = 0, entrylo1 = 0;
342 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
344 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
348 local_irq_save(flags);
350 old_entryhi = read_c0_entryhi();
351 vaddr = badvaddr & (PAGE_MASK << 1);
352 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
354 write_c0_entrylo0(entrylo0);
356 write_c0_entrylo1(entrylo1);
358 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
365 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
366 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
367 read_c0_entrylo0(), read_c0_entrylo1());
370 /* Restore old ASID */
371 write_c0_entryhi(old_entryhi);
374 local_irq_restore(flags);
380 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
381 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
383 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
384 struct kvm *kvm = vcpu->kvm;
388 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
392 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
393 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
395 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
396 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
400 *hpa0 = pfn0 << PAGE_SHIFT;
403 *hpa1 = pfn1 << PAGE_SHIFT;
405 /* Get attributes from the Guest TLB */
406 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
407 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
408 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
409 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
410 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
411 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
414 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
415 tlb->tlb_lo0, tlb->tlb_lo1);
418 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
422 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
426 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
438 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
439 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
445 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
447 unsigned long old_entryhi, flags;
451 local_irq_save(flags);
453 old_entryhi = read_c0_entryhi();
455 if (KVM_GUEST_KERNEL_MODE(vcpu))
456 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
458 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
465 idx = read_c0_index();
467 /* Restore old ASID */
468 write_c0_entryhi(old_entryhi);
472 local_irq_restore(flags);
475 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
481 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
484 unsigned long flags, old_entryhi;
486 local_irq_save(flags);
489 old_entryhi = read_c0_entryhi();
491 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
496 idx = read_c0_index();
498 if (idx >= current_cpu_data.tlbsize)
502 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
505 write_c0_entrylo0(0);
508 write_c0_entrylo1(0);
515 write_c0_entryhi(old_entryhi);
519 local_irq_restore(flags);
523 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
524 (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
531 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
532 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
534 unsigned long flags, old_entryhi;
536 if (index >= current_cpu_data.tlbsize)
539 local_irq_save(flags);
542 old_entryhi = read_c0_entryhi();
544 write_c0_entryhi(UNIQUE_ENTRYHI(index));
547 write_c0_index(index);
550 write_c0_entrylo0(0);
553 write_c0_entrylo1(0);
560 write_c0_entryhi(old_entryhi);
564 local_irq_restore(flags);
569 void kvm_mips_flush_host_tlb(int skip_kseg0)
572 unsigned long old_entryhi, entryhi;
573 unsigned long old_pagemask;
575 int maxentry = current_cpu_data.tlbsize;
578 local_irq_save(flags);
580 old_entryhi = read_c0_entryhi();
581 old_pagemask = read_c0_pagemask();
583 /* Blast 'em all away. */
584 for (entry = 0; entry < maxentry; entry++) {
586 write_c0_index(entry);
593 entryhi = read_c0_entryhi();
595 /* Don't blow away guest kernel entries */
596 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
601 /* Make sure all entries differ. */
602 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
604 write_c0_entrylo0(0);
606 write_c0_entrylo1(0);
615 write_c0_entryhi(old_entryhi);
616 write_c0_pagemask(old_pagemask);
620 local_irq_restore(flags);
624 kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
625 struct kvm_vcpu *vcpu)
627 unsigned long asid = asid_cache(cpu);
629 if (!((asid += ASID_INC) & ASID_MASK)) {
630 if (cpu_has_vtag_icache) {
634 kvm_local_flush_tlb_all(); /* start new asid cycle */
636 if (!asid) /* fix version if needed */
637 asid = ASID_FIRST_VERSION;
640 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
643 void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
646 unsigned long old_entryhi;
647 unsigned long old_pagemask;
649 int cpu = smp_processor_id();
651 local_irq_save(flags);
653 old_entryhi = read_c0_entryhi();
654 old_pagemask = read_c0_pagemask();
656 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
657 write_c0_index(entry);
662 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
663 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
664 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
665 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
668 write_c0_entryhi(old_entryhi);
669 write_c0_pagemask(old_pagemask);
672 local_irq_restore(flags);
676 void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
679 unsigned long old_ctx;
681 int cpu = smp_processor_id();
683 local_irq_save(flags);
685 old_ctx = read_c0_entryhi();
687 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
688 write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
690 write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
691 write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
693 write_c0_index(entry);
701 write_c0_entryhi(old_ctx);
703 local_irq_restore(flags);
707 void kvm_local_flush_tlb_all(void)
710 unsigned long old_ctx;
713 local_irq_save(flags);
714 /* Save old context and create impossible VPN2 value */
715 old_ctx = read_c0_entryhi();
716 write_c0_entrylo0(0);
717 write_c0_entrylo1(0);
719 /* Blast 'em all away. */
720 while (entry < current_cpu_data.tlbsize) {
721 /* Make sure all entries differ. */
722 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
723 write_c0_index(entry);
729 write_c0_entryhi(old_ctx);
732 local_irq_restore(flags);
735 void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
739 for_each_possible_cpu(cpu) {
740 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
741 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
742 UNIQUE_ENTRYHI(entry);
743 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
744 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
745 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
749 ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
751 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
752 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
753 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
759 /* Restore ASID once we are scheduled back after preemption */
760 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
766 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
769 /* Alocate new kernel and user ASIDs if needed */
771 local_irq_save(flags);
774 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
775 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
776 vcpu->arch.guest_kernel_asid[cpu] =
777 vcpu->arch.guest_kernel_mm.context.asid[cpu];
778 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
779 vcpu->arch.guest_user_asid[cpu] =
780 vcpu->arch.guest_user_mm.context.asid[cpu];
783 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
784 cpu_context(cpu, current->mm));
785 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
786 cpu, vcpu->arch.guest_kernel_asid[cpu]);
787 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
788 vcpu->arch.guest_user_asid[cpu]);
791 if (vcpu->arch.last_sched_cpu != cpu) {
792 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
793 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
796 /* Only reload shadow host TLB if new ASIDs haven't been allocated */
798 if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
799 kvm_mips_flush_host_tlb(0);
800 kvm_shadow_tlb_load(vcpu);
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(vcpu->arch.
808 preempt_entryhi & ASID_MASK);
812 /* New ASIDs were allocated for the VM */
814 /* Were we in guest context? If so then the pre-empted ASID is no longer
815 * valid, we need to set it to what it should be based on the mode of
816 * the Guest (Kernel/User)
818 if (current->flags & PF_VCPU) {
819 if (KVM_GUEST_KERNEL_MODE(vcpu))
820 write_c0_entryhi(vcpu->arch.
821 guest_kernel_asid[cpu] &
824 write_c0_entryhi(vcpu->arch.
825 guest_user_asid[cpu] &
831 local_irq_restore(flags);
835 /* ASID can change if another task is scheduled during preemption */
836 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
841 local_irq_save(flags);
843 cpu = smp_processor_id();
846 vcpu->arch.preempt_entryhi = read_c0_entryhi();
847 vcpu->arch.last_sched_cpu = cpu;
850 if ((atomic_read(&kvm_mips_instance) > 1)) {
851 kvm_shadow_tlb_put(vcpu);
855 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
856 ASID_VERSION_MASK)) {
857 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
858 cpu_context(cpu, current->mm));
859 drop_mmu_context(current->mm, cpu);
861 write_c0_entryhi(cpu_asid(cpu, current->mm));
864 local_irq_restore(flags);
867 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
869 struct mips_coproc *cop0 = vcpu->arch.cop0;
870 unsigned long paddr, flags;
874 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
875 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
876 local_irq_save(flags);
877 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
882 kvm_mips_guest_tlb_lookup(vcpu,
883 ((unsigned long) opc & VPN2_MASK)
885 (kvm_read_c0_guest_entryhi
886 (cop0) & ASID_MASK));
889 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
890 __func__, opc, vcpu, read_c0_entryhi());
891 kvm_mips_dump_host_tlbs();
892 local_irq_restore(flags);
893 return KVM_INVALID_INST;
895 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
901 local_irq_restore(flags);
902 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
904 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
905 (unsigned long) opc);
906 inst = *(uint32_t *) CKSEG0ADDR(paddr);
908 kvm_err("%s: illegal address: %p\n", __func__, opc);
909 return KVM_INVALID_INST;
915 EXPORT_SYMBOL(kvm_local_flush_tlb_all);
916 EXPORT_SYMBOL(kvm_shadow_tlb_put);
917 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
918 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
919 EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
920 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
921 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
922 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
923 EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
924 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
925 EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
926 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
927 EXPORT_SYMBOL(kvm_shadow_tlb_load);
928 EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
929 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
930 EXPORT_SYMBOL(kvm_get_inst);
931 EXPORT_SYMBOL(kvm_arch_vcpu_load);
932 EXPORT_SYMBOL(kvm_arch_vcpu_put);