KVM: MIPS/MMU: Invalidate GVA PTs on ASID changes
[linux-2.6-block.git] / arch / mips / kvm / mmu.c
CommitLineData
403015b3
JH
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS MMU handling in the KVM module.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
28cc5bd5 12#include <linux/highmem.h>
403015b3
JH
13#include <linux/kvm_host.h>
14#include <asm/mmu_context.h>
a31b50d7 15#include <asm/pgalloc.h>
403015b3
JH
16
17static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
18{
c550d539 19 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
403015b3
JH
20 int cpu = smp_processor_id();
21
c550d539 22 return cpu_asid(cpu, kern_mm);
403015b3
JH
23}
24
25static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
26{
c550d539 27 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
403015b3
JH
28 int cpu = smp_processor_id();
29
c550d539 30 return cpu_asid(cpu, user_mm);
403015b3
JH
31}
32
33static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
34{
35 int srcu_idx, err = 0;
36 kvm_pfn_t pfn;
37
38 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
39 return 0;
40
41 srcu_idx = srcu_read_lock(&kvm->srcu);
9befad23 42 pfn = gfn_to_pfn(kvm, gfn);
403015b3 43
ba913e4f 44 if (is_error_noslot_pfn(pfn)) {
403015b3
JH
45 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
46 err = -EFAULT;
47 goto out;
48 }
49
50 kvm->arch.guest_pmap[gfn] = pfn;
51out:
52 srcu_read_unlock(&kvm->srcu, srcu_idx);
53 return err;
54}
55
56/* Translate guest KSEG0 addresses to Host PA */
57unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
58 unsigned long gva)
59{
60 gfn_t gfn;
61 unsigned long offset = gva & ~PAGE_MASK;
62 struct kvm *kvm = vcpu->kvm;
63
64 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
65 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
66 __builtin_return_address(0), gva);
67 return KVM_INVALID_PAGE;
68 }
69
70 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
71
72 if (gfn >= kvm->arch.guest_pmap_npages) {
73 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
74 gva);
75 return KVM_INVALID_PAGE;
76 }
77
78 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
79 return KVM_INVALID_ADDR;
80
81 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
82}
83
a31b50d7
JH
84/*
85 * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
86 * Flush a range of guest physical address space from the VM's GPA page tables.
87 */
88
89static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
90 unsigned long end_gva)
91{
92 int i_min = __pte_offset(start_gva);
93 int i_max = __pte_offset(end_gva);
94 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
95 int i;
96
97 /*
98 * There's no freeing to do, so there's no point clearing individual
99 * entries unless only part of the last level page table needs flushing.
100 */
101 if (safe_to_remove)
102 return true;
103
104 for (i = i_min; i <= i_max; ++i) {
105 if (!pte_present(pte[i]))
106 continue;
107
108 set_pte(pte + i, __pte(0));
109 }
110 return false;
111}
112
113static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
114 unsigned long end_gva)
115{
116 pte_t *pte;
117 unsigned long end = ~0ul;
118 int i_min = __pmd_offset(start_gva);
119 int i_max = __pmd_offset(end_gva);
120 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
121 int i;
122
123 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
124 if (!pmd_present(pmd[i]))
125 continue;
126
127 pte = pte_offset(pmd + i, 0);
128 if (i == i_max)
129 end = end_gva;
130
131 if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
132 pmd_clear(pmd + i);
133 pte_free_kernel(NULL, pte);
134 } else {
135 safe_to_remove = false;
136 }
137 }
138 return safe_to_remove;
139}
140
141static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
142 unsigned long end_gva)
143{
144 pmd_t *pmd;
145 unsigned long end = ~0ul;
146 int i_min = __pud_offset(start_gva);
147 int i_max = __pud_offset(end_gva);
148 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
149 int i;
150
151 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
152 if (!pud_present(pud[i]))
153 continue;
154
155 pmd = pmd_offset(pud + i, 0);
156 if (i == i_max)
157 end = end_gva;
158
159 if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
160 pud_clear(pud + i);
161 pmd_free(NULL, pmd);
162 } else {
163 safe_to_remove = false;
164 }
165 }
166 return safe_to_remove;
167}
168
169static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
170 unsigned long end_gva)
171{
172 pud_t *pud;
173 unsigned long end = ~0ul;
174 int i_min = pgd_index(start_gva);
175 int i_max = pgd_index(end_gva);
176 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
177 int i;
178
179 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
180 if (!pgd_present(pgd[i]))
181 continue;
182
183 pud = pud_offset(pgd + i, 0);
184 if (i == i_max)
185 end = end_gva;
186
187 if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
188 pgd_clear(pgd + i);
189 pud_free(NULL, pud);
190 } else {
191 safe_to_remove = false;
192 }
193 }
194 return safe_to_remove;
195}
196
197void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
198{
199 if (flags & KMF_GPA) {
200 /* all of guest virtual address space could be affected */
201 if (flags & KMF_KERN)
202 /* useg, kseg0, seg2/3 */
203 kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
204 else
205 /* useg */
206 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
207 } else {
208 /* useg */
209 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
210
211 /* kseg2/3 */
212 if (flags & KMF_KERN)
213 kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
214 }
215}
216
403015b3
JH
217/* XXXKYMA: Must be called with interrupts disabled */
218int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
219 struct kvm_vcpu *vcpu)
220{
221 gfn_t gfn;
222 kvm_pfn_t pfn0, pfn1;
223 unsigned long vaddr = 0;
224 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
403015b3
JH
225 struct kvm *kvm = vcpu->kvm;
226 const int flush_dcache_mask = 0;
227 int ret;
228
229 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
230 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
231 kvm_mips_dump_host_tlbs();
232 return -1;
233 }
234
235 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
0741f52d 236 if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
403015b3
JH
237 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
238 gfn, badvaddr);
239 kvm_mips_dump_host_tlbs();
240 return -1;
241 }
403015b3
JH
242 vaddr = badvaddr & (PAGE_MASK << 1);
243
244 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
245 return -1;
246
247 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
248 return -1;
249
021df206
JH
250 pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
251 pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
403015b3 252
e6207bbe 253 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
7414d2f6
JH
254 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
255 ENTRYLO_D | ENTRYLO_V;
e6207bbe 256 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
7414d2f6
JH
257 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
258 ENTRYLO_D | ENTRYLO_V;
403015b3
JH
259
260 preempt_disable();
261 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
262 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
263 flush_dcache_mask);
264 preempt_enable();
265
266 return ret;
267}
268
269int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
26ee17ff 270 struct kvm_mips_tlb *tlb)
403015b3
JH
271{
272 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
273 struct kvm *kvm = vcpu->kvm;
274 kvm_pfn_t pfn0, pfn1;
8985d503 275 gfn_t gfn0, gfn1;
c604cffa 276 long tlb_lo[2];
403015b3
JH
277 int ret;
278
c604cffa
JH
279 tlb_lo[0] = tlb->tlb_lo[0];
280 tlb_lo[1] = tlb->tlb_lo[1];
281
282 /*
283 * The commpage address must not be mapped to anything else if the guest
284 * TLB contains entries nearby, or commpage accesses will break.
285 */
286 if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
287 VPN2_MASK & (PAGE_MASK << 1)))
288 tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
289
8985d503
JH
290 gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
291 gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
292 if (gfn0 >= kvm->arch.guest_pmap_npages ||
293 gfn1 >= kvm->arch.guest_pmap_npages) {
294 kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
295 __func__, gfn0, gfn1, tlb->tlb_hi);
296 kvm_mips_dump_guest_tlbs(vcpu);
297 return -1;
298 }
299
300 if (kvm_mips_map_page(kvm, gfn0) < 0)
c604cffa
JH
301 return -1;
302
8985d503 303 if (kvm_mips_map_page(kvm, gfn1) < 0)
c604cffa
JH
304 return -1;
305
8985d503
JH
306 pfn0 = kvm->arch.guest_pmap[gfn0];
307 pfn1 = kvm->arch.guest_pmap[gfn1];
403015b3 308
403015b3 309 /* Get attributes from the Guest TLB */
e6207bbe 310 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
7414d2f6 311 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
c604cffa
JH
312 (tlb_lo[0] & ENTRYLO_D) |
313 (tlb_lo[0] & ENTRYLO_V);
e6207bbe 314 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
7414d2f6 315 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
c604cffa
JH
316 (tlb_lo[1] & ENTRYLO_D) |
317 (tlb_lo[1] & ENTRYLO_V);
403015b3
JH
318
319 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
9fbfb06a 320 tlb->tlb_lo[0], tlb->tlb_lo[1]);
403015b3
JH
321
322 preempt_disable();
323 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
324 kvm_mips_get_kernel_asid(vcpu) :
325 kvm_mips_get_user_asid(vcpu));
326 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
327 tlb->tlb_mask);
328 preempt_enable();
329
330 return ret;
331}
332
333void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
334 struct kvm_vcpu *vcpu)
335{
336 unsigned long asid = asid_cache(cpu);
337
338 asid += cpu_asid_inc();
339 if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
340 if (cpu_has_vtag_icache)
341 flush_icache_all();
342
343 kvm_local_flush_tlb_all(); /* start new asid cycle */
344
345 if (!asid) /* fix version if needed */
346 asid = asid_first_version(cpu);
347 }
348
349 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
350}
351
352/**
353 * kvm_mips_migrate_count() - Migrate timer.
354 * @vcpu: Virtual CPU.
355 *
356 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
357 * if it was running prior to being cancelled.
358 *
359 * Must be called when the VCPU is migrated to a different CPU to ensure that
360 * timer expiry during guest execution interrupts the guest and causes the
361 * interrupt to be delivered in a timely manner.
362 */
363static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
364{
365 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
366 hrtimer_restart(&vcpu->arch.comparecount_timer);
367}
368
369/* Restore ASID once we are scheduled back after preemption */
370void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
371{
403015b3 372 unsigned long flags;
403015b3
JH
373
374 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
375
403015b3
JH
376 local_irq_save(flags);
377
403015b3
JH
378 if (vcpu->arch.last_sched_cpu != cpu) {
379 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
380 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
381 /*
382 * Migrate the timer interrupt to the current CPU so that it
383 * always interrupts the guest and synchronously triggers a
384 * guest timer interrupt.
385 */
386 kvm_mips_migrate_count(vcpu);
387 }
388
403015b3 389 /* restore guest state to registers */
a60b8438 390 kvm_mips_callbacks->vcpu_load(vcpu, cpu);
403015b3
JH
391
392 local_irq_restore(flags);
403015b3
JH
393}
394
395/* ASID can change if another task is scheduled during preemption */
396void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
397{
398 unsigned long flags;
399 int cpu;
400
401 local_irq_save(flags);
402
403 cpu = smp_processor_id();
403015b3
JH
404 vcpu->arch.last_sched_cpu = cpu;
405
406 /* save guest state in registers */
a60b8438 407 kvm_mips_callbacks->vcpu_put(vcpu, cpu);
403015b3 408
403015b3
JH
409 local_irq_restore(flags);
410}
411
412u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
413{
414 struct mips_coproc *cop0 = vcpu->arch.cop0;
415 unsigned long paddr, flags, vpn2, asid;
35fec262 416 unsigned long va = (unsigned long)opc;
28cc5bd5 417 void *vaddr;
403015b3
JH
418 u32 inst;
419 int index;
420
35fec262
JH
421 if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
422 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
403015b3 423 local_irq_save(flags);
35fec262 424 index = kvm_mips_host_tlb_lookup(vcpu, va);
403015b3
JH
425 if (index >= 0) {
426 inst = *(opc);
427 } else {
35fec262 428 vpn2 = va & VPN2_MASK;
403015b3
JH
429 asid = kvm_read_c0_guest_entryhi(cop0) &
430 KVM_ENTRYHI_ASID;
431 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
432 if (index < 0) {
433 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
434 __func__, opc, vcpu, read_c0_entryhi());
435 kvm_mips_dump_host_tlbs();
eafc4ed2 436 kvm_mips_dump_guest_tlbs(vcpu);
403015b3
JH
437 local_irq_restore(flags);
438 return KVM_INVALID_INST;
439 }
9b731bcf
JH
440 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
441 &vcpu->arch.guest_tlb[index])) {
442 kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
443 __func__, opc, index, vcpu,
444 read_c0_entryhi());
445 kvm_mips_dump_guest_tlbs(vcpu);
446 local_irq_restore(flags);
447 return KVM_INVALID_INST;
448 }
403015b3
JH
449 inst = *(opc);
450 }
451 local_irq_restore(flags);
35fec262
JH
452 } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
453 paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
28cc5bd5
JH
454 vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
455 vaddr += paddr & ~PAGE_MASK;
456 inst = *(u32 *)vaddr;
457 kunmap_atomic(vaddr);
403015b3
JH
458 } else {
459 kvm_err("%s: illegal address: %p\n", __func__, opc);
460 return KVM_INVALID_INST;
461 }
462
463 return inst;
464}