2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
27 #include <asm/tlbdebug.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
36 atomic_t kvm_mips_instance;
37 EXPORT_SYMBOL_GPL(kvm_mips_instance);
39 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
41 int cpu = smp_processor_id();
43 return vcpu->arch.guest_kernel_asid[cpu] &
44 cpu_asid_mask(&cpu_data[cpu]);
47 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
49 int cpu = smp_processor_id();
51 return vcpu->arch.guest_user_asid[cpu] &
52 cpu_asid_mask(&cpu_data[cpu]);
55 inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
57 return vcpu->kvm->arch.commpage_tlb;
60 /* Structure defining an tlb entry data set. */
62 void kvm_mips_dump_host_tlbs(void)
66 local_irq_save(flags);
68 kvm_info("HOST TLBs:\n");
73 local_irq_restore(flags);
75 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
77 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
79 struct mips_coproc *cop0 = vcpu->arch.cop0;
80 struct kvm_mips_tlb tlb;
83 kvm_info("Guest TLBs:\n");
84 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
86 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
87 tlb = vcpu->arch.guest_tlb[i];
88 kvm_info("TLB%c%3d Hi 0x%08lx ",
89 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & MIPS3_PG_V
92 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
93 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
94 (tlb.tlb_lo[0] & MIPS3_PG_D) ? 'D' : ' ',
95 (tlb.tlb_lo[0] & MIPS3_PG_G) ? 'G' : ' ',
96 (tlb.tlb_lo[0] >> 3) & 7);
97 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
98 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
99 (tlb.tlb_lo[1] & MIPS3_PG_D) ? 'D' : ' ',
100 (tlb.tlb_lo[1] & MIPS3_PG_G) ? 'G' : ' ',
101 (tlb.tlb_lo[1] >> 3) & 7, tlb.tlb_mask);
104 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
106 /* XXXKYMA: Must be called with interrupts disabled */
107 /* set flush_dcache_mask == 0 if no dcache flush required */
108 int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
109 unsigned long entrylo0, unsigned long entrylo1,
110 int flush_dcache_mask)
113 unsigned long old_entryhi;
116 local_irq_save(flags);
118 old_entryhi = read_c0_entryhi();
119 write_c0_entryhi(entryhi);
124 idx = read_c0_index();
126 if (idx > current_cpu_data.tlbsize) {
127 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
128 kvm_mips_dump_host_tlbs();
129 local_irq_restore(flags);
133 write_c0_entrylo0(entrylo0);
134 write_c0_entrylo1(entrylo1);
143 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
144 vcpu->arch.pc, idx, read_c0_entryhi(),
145 read_c0_entrylo0(), read_c0_entrylo1());
148 if (flush_dcache_mask) {
149 if (entrylo0 & MIPS3_PG_V) {
150 ++vcpu->stat.flush_dcache_exits;
151 flush_data_cache_page((entryhi & VPN2_MASK) &
154 if (entrylo1 & MIPS3_PG_V) {
155 ++vcpu->stat.flush_dcache_exits;
156 flush_data_cache_page(((entryhi & VPN2_MASK) &
157 ~flush_dcache_mask) |
158 (0x1 << PAGE_SHIFT));
162 /* Restore old ASID */
163 write_c0_entryhi(old_entryhi);
165 local_irq_restore(flags);
168 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write);
170 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
171 struct kvm_vcpu *vcpu)
173 kvm_pfn_t pfn0, pfn1;
174 unsigned long flags, old_entryhi = 0, vaddr = 0;
175 unsigned long entrylo0 = 0, entrylo1 = 0;
177 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
179 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
180 (1 << 2) | (0x1 << 1);
183 local_irq_save(flags);
185 old_entryhi = read_c0_entryhi();
186 vaddr = badvaddr & (PAGE_MASK << 1);
187 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
188 write_c0_entrylo0(entrylo0);
189 write_c0_entrylo1(entrylo1);
190 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
195 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
196 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
197 read_c0_entrylo0(), read_c0_entrylo1());
199 /* Restore old ASID */
200 write_c0_entryhi(old_entryhi);
202 local_irq_restore(flags);
206 EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
208 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
212 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
214 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
215 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
216 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
222 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
223 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
227 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
229 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
231 unsigned long old_entryhi, flags;
234 local_irq_save(flags);
236 old_entryhi = read_c0_entryhi();
238 if (KVM_GUEST_KERNEL_MODE(vcpu))
239 write_c0_entryhi((vaddr & VPN2_MASK) |
240 kvm_mips_get_kernel_asid(vcpu));
242 write_c0_entryhi((vaddr & VPN2_MASK) |
243 kvm_mips_get_user_asid(vcpu));
250 idx = read_c0_index();
252 /* Restore old ASID */
253 write_c0_entryhi(old_entryhi);
256 local_irq_restore(flags);
258 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
262 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
264 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
267 unsigned long flags, old_entryhi;
269 local_irq_save(flags);
271 old_entryhi = read_c0_entryhi();
273 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
278 idx = read_c0_index();
280 if (idx >= current_cpu_data.tlbsize)
284 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
285 write_c0_entrylo0(0);
286 write_c0_entrylo1(0);
293 write_c0_entryhi(old_entryhi);
296 local_irq_restore(flags);
299 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
300 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
304 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
306 void kvm_mips_flush_host_tlb(int skip_kseg0)
309 unsigned long old_entryhi, entryhi;
310 unsigned long old_pagemask;
312 int maxentry = current_cpu_data.tlbsize;
314 local_irq_save(flags);
316 old_entryhi = read_c0_entryhi();
317 old_pagemask = read_c0_pagemask();
319 /* Blast 'em all away. */
320 for (entry = 0; entry < maxentry; entry++) {
321 write_c0_index(entry);
328 entryhi = read_c0_entryhi();
330 /* Don't blow away guest kernel entries */
331 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
335 /* Make sure all entries differ. */
336 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
337 write_c0_entrylo0(0);
338 write_c0_entrylo1(0);
345 write_c0_entryhi(old_entryhi);
346 write_c0_pagemask(old_pagemask);
349 local_irq_restore(flags);
351 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
353 void kvm_local_flush_tlb_all(void)
356 unsigned long old_ctx;
359 local_irq_save(flags);
360 /* Save old context and create impossible VPN2 value */
361 old_ctx = read_c0_entryhi();
362 write_c0_entrylo0(0);
363 write_c0_entrylo1(0);
365 /* Blast 'em all away. */
366 while (entry < current_cpu_data.tlbsize) {
367 /* Make sure all entries differ. */
368 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
369 write_c0_index(entry);
375 write_c0_entryhi(old_ctx);
378 local_irq_restore(flags);
380 EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);