mips: drop __pXd_offset() macros that duplicate pXd_index() ones
[linux-2.6-block.git] / arch / mips / kvm / mmu.c
CommitLineData
403015b3
JH
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS MMU handling in the KVM module.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
28cc5bd5 12#include <linux/highmem.h>
403015b3 13#include <linux/kvm_host.h>
dacc3ed1 14#include <linux/uaccess.h>
403015b3 15#include <asm/mmu_context.h>
a31b50d7 16#include <asm/pgalloc.h>
403015b3 17
fb995893
JH
18/*
19 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
20 * for which pages need to be cached.
21 */
22#if defined(__PAGETABLE_PMD_FOLDED)
23#define KVM_MMU_CACHE_MIN_PAGES 1
24#else
25#define KVM_MMU_CACHE_MIN_PAGES 2
26#endif
27
28static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
29 int min, int max)
30{
31 void *page;
32
33 BUG_ON(max > KVM_NR_MEM_OBJS);
34 if (cache->nobjs >= min)
35 return 0;
36 while (cache->nobjs < max) {
37 page = (void *)__get_free_page(GFP_KERNEL);
38 if (!page)
39 return -ENOMEM;
40 cache->objects[cache->nobjs++] = page;
41 }
42 return 0;
43}
44
aba85929
JH
45static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
46{
47 while (mc->nobjs)
48 free_page((unsigned long)mc->objects[--mc->nobjs]);
49}
50
51static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
52{
53 void *p;
54
55 BUG_ON(!mc || !mc->nobjs);
56 p = mc->objects[--mc->nobjs];
57 return p;
58}
59
60void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
61{
62 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
63}
64
06c158c9
JH
65/**
66 * kvm_pgd_init() - Initialise KVM GPA page directory.
67 * @page: Pointer to page directory (PGD) for KVM GPA.
68 *
69 * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
70 * representing no mappings. This is similar to pgd_init(), however it
71 * initialises all the page directory pointers, not just the ones corresponding
72 * to the userland address space (since it is for the guest physical address
73 * space rather than a virtual address space).
74 */
75static void kvm_pgd_init(void *page)
76{
77 unsigned long *p, *end;
78 unsigned long entry;
79
80#ifdef __PAGETABLE_PMD_FOLDED
81 entry = (unsigned long)invalid_pte_table;
82#else
83 entry = (unsigned long)invalid_pmd_table;
84#endif
85
86 p = (unsigned long *)page;
87 end = p + PTRS_PER_PGD;
88
89 do {
90 p[0] = entry;
91 p[1] = entry;
92 p[2] = entry;
93 p[3] = entry;
94 p[4] = entry;
95 p += 8;
96 p[-3] = entry;
97 p[-2] = entry;
98 p[-1] = entry;
99 } while (p != end);
100}
101
102/**
103 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
104 *
105 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
106 * to host physical page mappings.
107 *
108 * Returns: Pointer to new KVM GPA page directory.
109 * NULL on allocation failure.
110 */
111pgd_t *kvm_pgd_alloc(void)
112{
113 pgd_t *ret;
114
115 ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
116 if (ret)
117 kvm_pgd_init(ret);
118
119 return ret;
120}
121
aba85929
JH
122/**
123 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
124 * @pgd: Page directory pointer.
125 * @addr: Address to index page table using.
126 * @cache: MMU page cache to allocate new page tables from, or NULL.
127 *
128 * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
129 * address @addr. If page tables don't exist for @addr, they will be created
130 * from the MMU cache if @cache is not NULL.
131 *
132 * Returns: Pointer to pte_t corresponding to @addr.
133 * NULL if a page table doesn't exist for @addr and !@cache.
134 * NULL if a page table allocation failed.
135 */
136static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
137 unsigned long addr)
138{
139 pud_t *pud;
140 pmd_t *pmd;
141
142 pgd += pgd_index(addr);
143 if (pgd_none(*pgd)) {
144 /* Not used on MIPS yet */
145 BUG();
146 return NULL;
147 }
148 pud = pud_offset(pgd, addr);
149 if (pud_none(*pud)) {
150 pmd_t *new_pmd;
151
152 if (!cache)
153 return NULL;
154 new_pmd = mmu_memory_cache_alloc(cache);
155 pmd_init((unsigned long)new_pmd,
156 (unsigned long)invalid_pte_table);
157 pud_populate(NULL, pud, new_pmd);
158 }
159 pmd = pmd_offset(pud, addr);
160 if (pmd_none(*pmd)) {
161 pte_t *new_pte;
162
163 if (!cache)
164 return NULL;
165 new_pte = mmu_memory_cache_alloc(cache);
166 clear_page(new_pte);
167 pmd_populate_kernel(NULL, pmd, new_pte);
168 }
169 return pte_offset(pmd, addr);
170}
171
06c158c9
JH
172/* Caller must hold kvm->mm_lock */
173static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
174 struct kvm_mmu_memory_cache *cache,
175 unsigned long addr)
403015b3 176{
06c158c9
JH
177 return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
178}
403015b3 179
06c158c9
JH
180/*
181 * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
182 * Flush a range of guest physical address space from the VM's GPA page tables.
183 */
184
185static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
186 unsigned long end_gpa)
187{
188 int i_min = __pte_offset(start_gpa);
189 int i_max = __pte_offset(end_gpa);
190 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
191 int i;
192
193 for (i = i_min; i <= i_max; ++i) {
194 if (!pte_present(pte[i]))
195 continue;
196
06c158c9
JH
197 set_pte(pte + i, __pte(0));
198 }
199 return safe_to_remove;
200}
201
202static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
203 unsigned long end_gpa)
204{
205 pte_t *pte;
206 unsigned long end = ~0ul;
31168f03
MR
207 int i_min = pmd_index(start_gpa);
208 int i_max = pmd_index(end_gpa);
06c158c9
JH
209 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
210 int i;
211
212 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
213 if (!pmd_present(pmd[i]))
214 continue;
215
216 pte = pte_offset(pmd + i, 0);
217 if (i == i_max)
218 end = end_gpa;
219
220 if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
221 pmd_clear(pmd + i);
222 pte_free_kernel(NULL, pte);
223 } else {
224 safe_to_remove = false;
225 }
226 }
227 return safe_to_remove;
228}
229
230static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
231 unsigned long end_gpa)
232{
233 pmd_t *pmd;
234 unsigned long end = ~0ul;
31168f03
MR
235 int i_min = pud_index(start_gpa);
236 int i_max = pud_index(end_gpa);
06c158c9
JH
237 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
238 int i;
239
240 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
241 if (!pud_present(pud[i]))
242 continue;
243
244 pmd = pmd_offset(pud + i, 0);
245 if (i == i_max)
246 end = end_gpa;
247
248 if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
249 pud_clear(pud + i);
250 pmd_free(NULL, pmd);
251 } else {
252 safe_to_remove = false;
253 }
254 }
255 return safe_to_remove;
256}
257
258static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
259 unsigned long end_gpa)
260{
261 pud_t *pud;
262 unsigned long end = ~0ul;
263 int i_min = pgd_index(start_gpa);
264 int i_max = pgd_index(end_gpa);
265 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
266 int i;
267
268 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
269 if (!pgd_present(pgd[i]))
270 continue;
271
272 pud = pud_offset(pgd + i, 0);
273 if (i == i_max)
274 end = end_gpa;
275
276 if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
277 pgd_clear(pgd + i);
278 pud_free(NULL, pud);
279 } else {
280 safe_to_remove = false;
281 }
282 }
283 return safe_to_remove;
284}
285
286/**
287 * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
288 * @kvm: KVM pointer.
289 * @start_gfn: Guest frame number of first page in GPA range to flush.
290 * @end_gfn: Guest frame number of last page in GPA range to flush.
291 *
292 * Flushes a range of GPA mappings from the GPA page tables.
293 *
294 * The caller must hold the @kvm->mmu_lock spinlock.
295 *
296 * Returns: Whether its safe to remove the top level page directory because
297 * all lower levels have been removed.
298 */
299bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
300{
301 return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
302 start_gfn << PAGE_SHIFT,
303 end_gfn << PAGE_SHIFT);
304}
305
f0c0c330
JH
306#define BUILD_PTE_RANGE_OP(name, op) \
307static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
308 unsigned long end) \
309{ \
310 int ret = 0; \
311 int i_min = __pte_offset(start); \
312 int i_max = __pte_offset(end); \
313 int i; \
314 pte_t old, new; \
315 \
316 for (i = i_min; i <= i_max; ++i) { \
317 if (!pte_present(pte[i])) \
318 continue; \
319 \
320 old = pte[i]; \
321 new = op(old); \
322 if (pte_val(new) == pte_val(old)) \
323 continue; \
324 set_pte(pte + i, new); \
325 ret = 1; \
326 } \
327 return ret; \
328} \
329 \
330/* returns true if anything was done */ \
331static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
332 unsigned long end) \
333{ \
334 int ret = 0; \
335 pte_t *pte; \
336 unsigned long cur_end = ~0ul; \
31168f03
MR
337 int i_min = pmd_index(start); \
338 int i_max = pmd_index(end); \
f0c0c330
JH
339 int i; \
340 \
341 for (i = i_min; i <= i_max; ++i, start = 0) { \
342 if (!pmd_present(pmd[i])) \
343 continue; \
344 \
345 pte = pte_offset(pmd + i, 0); \
346 if (i == i_max) \
347 cur_end = end; \
348 \
349 ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
350 } \
351 return ret; \
352} \
353 \
354static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
355 unsigned long end) \
356{ \
357 int ret = 0; \
358 pmd_t *pmd; \
359 unsigned long cur_end = ~0ul; \
31168f03
MR
360 int i_min = pud_index(start); \
361 int i_max = pud_index(end); \
f0c0c330
JH
362 int i; \
363 \
364 for (i = i_min; i <= i_max; ++i, start = 0) { \
365 if (!pud_present(pud[i])) \
366 continue; \
367 \
368 pmd = pmd_offset(pud + i, 0); \
369 if (i == i_max) \
370 cur_end = end; \
371 \
372 ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
373 } \
374 return ret; \
375} \
376 \
377static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
378 unsigned long end) \
379{ \
380 int ret = 0; \
381 pud_t *pud; \
382 unsigned long cur_end = ~0ul; \
383 int i_min = pgd_index(start); \
384 int i_max = pgd_index(end); \
385 int i; \
386 \
387 for (i = i_min; i <= i_max; ++i, start = 0) { \
388 if (!pgd_present(pgd[i])) \
389 continue; \
390 \
391 pud = pud_offset(pgd + i, 0); \
392 if (i == i_max) \
393 cur_end = end; \
394 \
395 ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
396 } \
397 return ret; \
398}
399
400/*
401 * kvm_mips_mkclean_gpa_pt.
402 * Mark a range of guest physical address space clean (writes fault) in the VM's
403 * GPA page table to allow dirty page tracking.
404 */
405
406BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
407
408/**
409 * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
410 * @kvm: KVM pointer.
411 * @start_gfn: Guest frame number of first page in GPA range to flush.
412 * @end_gfn: Guest frame number of last page in GPA range to flush.
413 *
414 * Make a range of GPA mappings clean so that guest writes will fault and
415 * trigger dirty page logging.
416 *
417 * The caller must hold the @kvm->mmu_lock spinlock.
418 *
419 * Returns: Whether any GPA mappings were modified, which would require
420 * derived mappings (GVA page tables & TLB enties) to be
421 * invalidated.
422 */
423int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
424{
425 return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
426 start_gfn << PAGE_SHIFT,
427 end_gfn << PAGE_SHIFT);
428}
429
e88643ba
JH
430/**
431 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
432 * @kvm: The KVM pointer
433 * @slot: The memory slot associated with mask
434 * @gfn_offset: The gfn offset in memory slot
435 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
436 * slot to be write protected
437 *
438 * Walks bits set in mask write protects the associated pte's. Caller must
439 * acquire @kvm->mmu_lock.
440 */
441void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
442 struct kvm_memory_slot *slot,
443 gfn_t gfn_offset, unsigned long mask)
444{
445 gfn_t base_gfn = slot->base_gfn + gfn_offset;
446 gfn_t start = base_gfn + __ffs(mask);
447 gfn_t end = base_gfn + __fls(mask);
448
449 kvm_mips_mkclean_gpa_pt(kvm, start, end);
450}
451
411740f5
JH
452/*
453 * kvm_mips_mkold_gpa_pt.
454 * Mark a range of guest physical address space old (all accesses fault) in the
455 * VM's GPA page table to allow detection of commonly used pages.
456 */
457
458BUILD_PTE_RANGE_OP(mkold, pte_mkold)
459
460static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
461 gfn_t end_gfn)
462{
463 return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
464 start_gfn << PAGE_SHIFT,
465 end_gfn << PAGE_SHIFT);
466}
467
468static int handle_hva_to_gpa(struct kvm *kvm,
469 unsigned long start,
470 unsigned long end,
471 int (*handler)(struct kvm *kvm, gfn_t gfn,
472 gpa_t gfn_end,
473 struct kvm_memory_slot *memslot,
474 void *data),
475 void *data)
476{
477 struct kvm_memslots *slots;
478 struct kvm_memory_slot *memslot;
479 int ret = 0;
480
481 slots = kvm_memslots(kvm);
482
483 /* we only care about the pages that the guest sees */
484 kvm_for_each_memslot(memslot, slots) {
485 unsigned long hva_start, hva_end;
486 gfn_t gfn, gfn_end;
487
488 hva_start = max(start, memslot->userspace_addr);
489 hva_end = min(end, memslot->userspace_addr +
490 (memslot->npages << PAGE_SHIFT));
491 if (hva_start >= hva_end)
492 continue;
493
494 /*
495 * {gfn(page) | page intersects with [hva_start, hva_end)} =
496 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
497 */
498 gfn = hva_to_gfn_memslot(hva_start, memslot);
499 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
500
501 ret |= handler(kvm, gfn, gfn_end, memslot, data);
502 }
503
504 return ret;
505}
506
507
508static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
509 struct kvm_memory_slot *memslot, void *data)
510{
511 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
512 return 1;
513}
514
411740f5
JH
515int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
516{
517 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
518
519 kvm_mips_callbacks->flush_shadow_all(kvm);
520 return 0;
521}
522
523static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
524 struct kvm_memory_slot *memslot, void *data)
525{
526 gpa_t gpa = gfn << PAGE_SHIFT;
527 pte_t hva_pte = *(pte_t *)data;
528 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
529 pte_t old_pte;
530
531 if (!gpa_pte)
532 return 0;
533
534 /* Mapping may need adjusting depending on memslot flags */
535 old_pte = *gpa_pte;
536 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
537 hva_pte = pte_mkclean(hva_pte);
538 else if (memslot->flags & KVM_MEM_READONLY)
539 hva_pte = pte_wrprotect(hva_pte);
540
541 set_pte(gpa_pte, hva_pte);
542
543 /* Replacing an absent or old page doesn't need flushes */
544 if (!pte_present(old_pte) || !pte_young(old_pte))
545 return 0;
546
547 /* Pages swapped, aged, moved, or cleaned require flushes */
548 return !pte_present(hva_pte) ||
549 !pte_young(hva_pte) ||
550 pte_pfn(old_pte) != pte_pfn(hva_pte) ||
551 (pte_dirty(old_pte) && !pte_dirty(hva_pte));
552}
553
748c0e31 554int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
411740f5
JH
555{
556 unsigned long end = hva + PAGE_SIZE;
557 int ret;
558
559 ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
560 if (ret)
561 kvm_mips_callbacks->flush_shadow_all(kvm);
748c0e31 562 return 0;
411740f5
JH
563}
564
565static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
566 struct kvm_memory_slot *memslot, void *data)
567{
568 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
569}
570
571static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
572 struct kvm_memory_slot *memslot, void *data)
573{
574 gpa_t gpa = gfn << PAGE_SHIFT;
575 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
576
577 if (!gpa_pte)
578 return 0;
579 return pte_young(*gpa_pte);
580}
581
582int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
583{
584 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
585}
586
587int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
588{
589 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
590}
591
b5f1dd1b
JH
592/**
593 * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
594 * @vcpu: VCPU pointer.
595 * @gpa: Guest physical address of fault.
596 * @write_fault: Whether the fault was due to a write.
597 * @out_entry: New PTE for @gpa (written on success unless NULL).
598 * @out_buddy: New PTE for @gpa's buddy (written on success unless
599 * NULL).
600 *
601 * Perform fast path GPA fault handling, doing all that can be done without
411740f5
JH
602 * calling into KVM. This handles marking old pages young (for idle page
603 * tracking), and dirtying of clean pages (for dirty page logging).
b5f1dd1b
JH
604 *
605 * Returns: 0 on success, in which case we can update derived mappings and
606 * resume guest execution.
607 * -EFAULT on failure due to absent GPA mapping or write to
608 * read-only page, in which case KVM must be consulted.
609 */
610static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
611 bool write_fault,
612 pte_t *out_entry, pte_t *out_buddy)
613{
614 struct kvm *kvm = vcpu->kvm;
615 gfn_t gfn = gpa >> PAGE_SHIFT;
616 pte_t *ptep;
411740f5
JH
617 kvm_pfn_t pfn = 0; /* silence bogus GCC warning */
618 bool pfn_valid = false;
b5f1dd1b
JH
619 int ret = 0;
620
621 spin_lock(&kvm->mmu_lock);
622
623 /* Fast path - just check GPA page table for an existing entry */
624 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
625 if (!ptep || !pte_present(*ptep)) {
626 ret = -EFAULT;
627 goto out;
628 }
629
411740f5
JH
630 /* Track access to pages marked old */
631 if (!pte_young(*ptep)) {
632 set_pte(ptep, pte_mkyoung(*ptep));
633 pfn = pte_pfn(*ptep);
634 pfn_valid = true;
635 /* call kvm_set_pfn_accessed() after unlock */
636 }
b5f1dd1b 637 if (write_fault && !pte_dirty(*ptep)) {
411740f5
JH
638 if (!pte_write(*ptep)) {
639 ret = -EFAULT;
640 goto out;
641 }
642
643 /* Track dirtying of writeable pages */
b5f1dd1b 644 set_pte(ptep, pte_mkdirty(*ptep));
411740f5 645 pfn = pte_pfn(*ptep);
b5f1dd1b 646 mark_page_dirty(kvm, gfn);
411740f5 647 kvm_set_pfn_dirty(pfn);
b5f1dd1b
JH
648 }
649
650 if (out_entry)
651 *out_entry = *ptep;
652 if (out_buddy)
653 *out_buddy = *ptep_buddy(ptep);
654
655out:
656 spin_unlock(&kvm->mmu_lock);
411740f5
JH
657 if (pfn_valid)
658 kvm_set_pfn_accessed(pfn);
b5f1dd1b
JH
659 return ret;
660}
661
06c158c9
JH
662/**
663 * kvm_mips_map_page() - Map a guest physical page.
664 * @vcpu: VCPU pointer.
665 * @gpa: Guest physical address of fault.
577ed7f7 666 * @write_fault: Whether the fault was due to a write.
06c158c9
JH
667 * @out_entry: New PTE for @gpa (written on success unless NULL).
668 * @out_buddy: New PTE for @gpa's buddy (written on success unless
669 * NULL).
670 *
671 * Handle GPA faults by creating a new GPA mapping (or updating an existing
672 * one).
673 *
411740f5
JH
674 * This takes care of marking pages young or dirty (idle/dirty page tracking),
675 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
676 * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
677 * caller.
06c158c9
JH
678 *
679 * Returns: 0 on success, in which case the caller may use the @out_entry
680 * and @out_buddy PTEs to update derived mappings and resume guest
681 * execution.
682 * -EFAULT if there is no memory region at @gpa or a write was
683 * attempted to a read-only memory region. This is usually handled
684 * as an MMIO access.
685 */
686static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
577ed7f7 687 bool write_fault,
06c158c9
JH
688 pte_t *out_entry, pte_t *out_buddy)
689{
690 struct kvm *kvm = vcpu->kvm;
691 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
692 gfn_t gfn = gpa >> PAGE_SHIFT;
693 int srcu_idx, err;
694 kvm_pfn_t pfn;
695 pte_t *ptep, entry, old_pte;
411740f5 696 bool writeable;
06c158c9 697 unsigned long prot_bits;
411740f5 698 unsigned long mmu_seq;
403015b3 699
411740f5 700 /* Try the fast path to handle old / clean pages */
403015b3 701 srcu_idx = srcu_read_lock(&kvm->srcu);
b5f1dd1b
JH
702 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
703 out_buddy);
704 if (!err)
705 goto out;
06c158c9
JH
706
707 /* We need a minimum of cached pages ready for page table creation */
708 err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
709 KVM_NR_MEM_OBJS);
710 if (err)
711 goto out;
712
411740f5
JH
713retry:
714 /*
715 * Used to check for invalidations in progress, of the pfn that is
716 * returned by pfn_to_pfn_prot below.
717 */
718 mmu_seq = kvm->mmu_notifier_seq;
719 /*
720 * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
721 * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
722 * risk the page we get a reference to getting unmapped before we have a
723 * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
724 *
725 * This smp_rmb() pairs with the effective smp_wmb() of the combination
726 * of the pte_unmap_unlock() after the PTE is zapped, and the
727 * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
728 * mmu_notifier_seq is incremented.
729 */
730 smp_rmb();
403015b3 731
411740f5
JH
732 /* Slow path - ask KVM core whether we can access this GPA */
733 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
ba913e4f 734 if (is_error_noslot_pfn(pfn)) {
403015b3
JH
735 err = -EFAULT;
736 goto out;
737 }
738
06c158c9 739 spin_lock(&kvm->mmu_lock);
411740f5
JH
740 /* Check if an invalidation has taken place since we got pfn */
741 if (mmu_notifier_retry(kvm, mmu_seq)) {
742 /*
743 * This can happen when mappings are changed asynchronously, but
744 * also synchronously if a COW is triggered by
745 * gfn_to_pfn_prot().
746 */
747 spin_unlock(&kvm->mmu_lock);
748 kvm_release_pfn_clean(pfn);
749 goto retry;
750 }
06c158c9 751
b5f1dd1b 752 /* Ensure page tables are allocated */
06c158c9
JH
753 ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
754
b5f1dd1b 755 /* Set up the PTE */
411740f5
JH
756 prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
757 if (writeable) {
758 prot_bits |= _PAGE_WRITE;
759 if (write_fault) {
760 prot_bits |= __WRITEABLE;
761 mark_page_dirty(kvm, gfn);
762 kvm_set_pfn_dirty(pfn);
763 }
b5f1dd1b 764 }
06c158c9
JH
765 entry = pfn_pte(pfn, __pgprot(prot_bits));
766
b5f1dd1b 767 /* Write the PTE */
06c158c9
JH
768 old_pte = *ptep;
769 set_pte(ptep, entry);
06c158c9
JH
770
771 err = 0;
772 if (out_entry)
773 *out_entry = *ptep;
774 if (out_buddy)
775 *out_buddy = *ptep_buddy(ptep);
776
777 spin_unlock(&kvm->mmu_lock);
411740f5
JH
778 kvm_release_pfn_clean(pfn);
779 kvm_set_pfn_accessed(pfn);
403015b3
JH
780out:
781 srcu_read_unlock(&kvm->srcu, srcu_idx);
782 return err;
783}
784
fb995893
JH
785static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
786 unsigned long addr)
787{
788 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
789 pgd_t *pgdp;
790 int ret;
791
792 /* We need a minimum of cached pages ready for page table creation */
793 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
794 KVM_NR_MEM_OBJS);
795 if (ret)
796 return NULL;
797
798 if (KVM_GUEST_KERNEL_MODE(vcpu))
799 pgdp = vcpu->arch.guest_kernel_mm.pgd;
800 else
801 pgdp = vcpu->arch.guest_user_mm.pgd;
802
803 return kvm_mips_walk_pgd(pgdp, memcache, addr);
804}
805
aba85929
JH
806void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
807 bool user)
808{
809 pgd_t *pgdp;
810 pte_t *ptep;
811
812 addr &= PAGE_MASK << 1;
813
814 pgdp = vcpu->arch.guest_kernel_mm.pgd;
815 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
816 if (ptep) {
817 ptep[0] = pfn_pte(0, __pgprot(0));
818 ptep[1] = pfn_pte(0, __pgprot(0));
819 }
820
821 if (user) {
822 pgdp = vcpu->arch.guest_user_mm.pgd;
823 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
824 if (ptep) {
825 ptep[0] = pfn_pte(0, __pgprot(0));
826 ptep[1] = pfn_pte(0, __pgprot(0));
827 }
828 }
829}
830
a31b50d7
JH
831/*
832 * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
833 * Flush a range of guest physical address space from the VM's GPA page tables.
834 */
835
836static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
837 unsigned long end_gva)
838{
839 int i_min = __pte_offset(start_gva);
840 int i_max = __pte_offset(end_gva);
841 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
842 int i;
843
844 /*
845 * There's no freeing to do, so there's no point clearing individual
846 * entries unless only part of the last level page table needs flushing.
847 */
848 if (safe_to_remove)
849 return true;
850
851 for (i = i_min; i <= i_max; ++i) {
852 if (!pte_present(pte[i]))
853 continue;
854
855 set_pte(pte + i, __pte(0));
856 }
857 return false;
858}
859
860static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
861 unsigned long end_gva)
862{
863 pte_t *pte;
864 unsigned long end = ~0ul;
31168f03
MR
865 int i_min = pmd_index(start_gva);
866 int i_max = pmd_index(end_gva);
a31b50d7
JH
867 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
868 int i;
869
870 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
871 if (!pmd_present(pmd[i]))
872 continue;
873
874 pte = pte_offset(pmd + i, 0);
875 if (i == i_max)
876 end = end_gva;
877
878 if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
879 pmd_clear(pmd + i);
880 pte_free_kernel(NULL, pte);
881 } else {
882 safe_to_remove = false;
883 }
884 }
885 return safe_to_remove;
886}
887
888static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
889 unsigned long end_gva)
890{
891 pmd_t *pmd;
892 unsigned long end = ~0ul;
31168f03
MR
893 int i_min = pud_index(start_gva);
894 int i_max = pud_index(end_gva);
a31b50d7
JH
895 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
896 int i;
897
898 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
899 if (!pud_present(pud[i]))
900 continue;
901
902 pmd = pmd_offset(pud + i, 0);
903 if (i == i_max)
904 end = end_gva;
905
906 if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
907 pud_clear(pud + i);
908 pmd_free(NULL, pmd);
909 } else {
910 safe_to_remove = false;
911 }
912 }
913 return safe_to_remove;
914}
915
916static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
917 unsigned long end_gva)
918{
919 pud_t *pud;
920 unsigned long end = ~0ul;
921 int i_min = pgd_index(start_gva);
922 int i_max = pgd_index(end_gva);
923 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
924 int i;
925
926 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
927 if (!pgd_present(pgd[i]))
928 continue;
929
930 pud = pud_offset(pgd + i, 0);
931 if (i == i_max)
932 end = end_gva;
933
934 if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
935 pgd_clear(pgd + i);
936 pud_free(NULL, pud);
937 } else {
938 safe_to_remove = false;
939 }
940 }
941 return safe_to_remove;
942}
943
944void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
945{
946 if (flags & KMF_GPA) {
947 /* all of guest virtual address space could be affected */
948 if (flags & KMF_KERN)
949 /* useg, kseg0, seg2/3 */
950 kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
951 else
952 /* useg */
953 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
954 } else {
955 /* useg */
956 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
957
958 /* kseg2/3 */
959 if (flags & KMF_KERN)
960 kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
961 }
962}
963
b584f460
JH
964static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
965{
966 /*
967 * Don't leak writeable but clean entries from GPA page tables. We don't
968 * want the normal Linux tlbmod handler to handle dirtying when KVM
969 * accesses guest memory.
970 */
971 if (!pte_dirty(pte))
972 pte = pte_wrprotect(pte);
973
974 return pte;
975}
976
f9b11e51
JH
977static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
978{
979 /* Guest EntryLo overrides host EntryLo */
980 if (!(entrylo & ENTRYLO_D))
981 pte = pte_mkclean(pte);
982
983 return kvm_mips_gpa_pte_to_gva_unmapped(pte);
984}
985
c992a4f6
JH
986#ifdef CONFIG_KVM_MIPS_VZ
987int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
988 struct kvm_vcpu *vcpu,
989 bool write_fault)
990{
991 int ret;
992
993 ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
994 if (ret)
995 return ret;
996
997 /* Invalidate this entry in the TLB */
998 return kvm_vz_host_tlb_inv(vcpu, badvaddr);
999}
1000#endif
1001
403015b3
JH
1002/* XXXKYMA: Must be called with interrupts disabled */
1003int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
577ed7f7
JH
1004 struct kvm_vcpu *vcpu,
1005 bool write_fault)
403015b3 1006{
06c158c9 1007 unsigned long gpa;
06c158c9 1008 pte_t pte_gpa[2], *ptep_gva;
b584f460 1009 int idx;
403015b3
JH
1010
1011 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
1012 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
1013 kvm_mips_dump_host_tlbs();
1014 return -1;
1015 }
1016
b584f460
JH
1017 /* Get the GPA page table entry */
1018 gpa = KVM_GUEST_CPHYSADDR(badvaddr);
1019 idx = (badvaddr >> PAGE_SHIFT) & 1;
1020 if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
1021 &pte_gpa[!idx]) < 0)
403015b3
JH
1022 return -1;
1023
b584f460
JH
1024 /* Get the GVA page table entry */
1025 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
fb995893 1026 if (!ptep_gva) {
b584f460 1027 kvm_err("No ptep for gva %lx\n", badvaddr);
fb995893
JH
1028 return -1;
1029 }
403015b3 1030
b584f460
JH
1031 /* Copy a pair of entries from GPA page table to GVA page table */
1032 ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
1033 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
403015b3 1034
fb995893 1035 /* Invalidate this entry in the TLB, guest kernel ASID only */
b584f460 1036 kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
fb995893 1037 return 0;
403015b3
JH
1038}
1039
1040int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
7e3d2a75 1041 struct kvm_mips_tlb *tlb,
577ed7f7
JH
1042 unsigned long gva,
1043 bool write_fault)
403015b3 1044{
f9b11e51
JH
1045 struct kvm *kvm = vcpu->kvm;
1046 long tlb_lo[2];
1047 pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
1048 unsigned int idx = TLB_LO_IDX(*tlb, gva);
7e3d2a75 1049 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
c604cffa 1050
f9b11e51
JH
1051 tlb_lo[0] = tlb->tlb_lo[0];
1052 tlb_lo[1] = tlb->tlb_lo[1];
1053
c604cffa
JH
1054 /*
1055 * The commpage address must not be mapped to anything else if the guest
1056 * TLB contains entries nearby, or commpage accesses will break.
1057 */
f9b11e51
JH
1058 if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
1059 tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
7e3d2a75 1060
f9b11e51
JH
1061 /* Get the GPA page table entry */
1062 if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
1063 write_fault, &pte_gpa[idx], NULL) < 0)
c604cffa
JH
1064 return -1;
1065
f9b11e51
JH
1066 /* And its GVA buddy's GPA page table entry if it also exists */
1067 pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
1068 if (tlb_lo[!idx] & ENTRYLO_V) {
1069 spin_lock(&kvm->mmu_lock);
1070 ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
1071 mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
1072 if (ptep_buddy)
1073 pte_gpa[!idx] = *ptep_buddy;
1074 spin_unlock(&kvm->mmu_lock);
1075 }
1076
1077 /* Get the GVA page table entry pair */
1078 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
7e3d2a75
JH
1079 if (!ptep_gva) {
1080 kvm_err("No ptep for gva %lx\n", gva);
c604cffa 1081 return -1;
7e3d2a75 1082 }
c604cffa 1083
f9b11e51
JH
1084 /* Copy a pair of entries from GPA page table to GVA page table */
1085 ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
1086 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
403015b3 1087
7e3d2a75
JH
1088 /* Invalidate this entry in the TLB, current guest mode ASID only */
1089 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
403015b3
JH
1090
1091 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
9fbfb06a 1092 tlb->tlb_lo[0], tlb->tlb_lo[1]);
403015b3 1093
7e3d2a75 1094 return 0;
403015b3
JH
1095}
1096
4c86460c
JH
1097int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
1098 struct kvm_vcpu *vcpu)
1099{
1100 kvm_pfn_t pfn;
1101 pte_t *ptep;
1102
1103 ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
1104 if (!ptep) {
1105 kvm_err("No ptep for commpage %lx\n", badvaddr);
1106 return -1;
1107 }
1108
1109 pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
1110 /* Also set valid and dirty, so refill handler doesn't have to */
1111 *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED)));
1112
1113 /* Invalidate this entry in the TLB, guest kernel ASID only */
1114 kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1115 return 0;
1116}
1117
403015b3
JH
1118/**
1119 * kvm_mips_migrate_count() - Migrate timer.
1120 * @vcpu: Virtual CPU.
1121 *
1122 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
1123 * if it was running prior to being cancelled.
1124 *
1125 * Must be called when the VCPU is migrated to a different CPU to ensure that
1126 * timer expiry during guest execution interrupts the guest and causes the
1127 * interrupt to be delivered in a timely manner.
1128 */
1129static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
1130{
1131 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
1132 hrtimer_restart(&vcpu->arch.comparecount_timer);
1133}
1134
1135/* Restore ASID once we are scheduled back after preemption */
1136void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1137{
403015b3 1138 unsigned long flags;
403015b3
JH
1139
1140 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
1141
403015b3
JH
1142 local_irq_save(flags);
1143
4841e0dd 1144 vcpu->cpu = cpu;
403015b3
JH
1145 if (vcpu->arch.last_sched_cpu != cpu) {
1146 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
1147 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
1148 /*
1149 * Migrate the timer interrupt to the current CPU so that it
1150 * always interrupts the guest and synchronously triggers a
1151 * guest timer interrupt.
1152 */
1153 kvm_mips_migrate_count(vcpu);
1154 }
1155
403015b3 1156 /* restore guest state to registers */
a60b8438 1157 kvm_mips_callbacks->vcpu_load(vcpu, cpu);
403015b3
JH
1158
1159 local_irq_restore(flags);
403015b3
JH
1160}
1161
1162/* ASID can change if another task is scheduled during preemption */
1163void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1164{
1165 unsigned long flags;
1166 int cpu;
1167
1168 local_irq_save(flags);
1169
1170 cpu = smp_processor_id();
403015b3 1171 vcpu->arch.last_sched_cpu = cpu;
4841e0dd 1172 vcpu->cpu = -1;
403015b3
JH
1173
1174 /* save guest state in registers */
a60b8438 1175 kvm_mips_callbacks->vcpu_put(vcpu, cpu);
403015b3 1176
403015b3
JH
1177 local_irq_restore(flags);
1178}
1179
1880afd6
JH
1180/**
1181 * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
1182 * @vcpu: Virtual CPU.
1183 * @gva: Guest virtual address to be accessed.
1184 * @write: True if write attempted (must be dirtied and made writable).
1185 *
1186 * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
1187 * dirtying the page if @write so that guest instructions can be modified.
1188 *
1189 * Returns: KVM_MIPS_MAPPED on success.
1190 * KVM_MIPS_GVA if bad guest virtual address.
1191 * KVM_MIPS_GPA if bad guest physical address.
1192 * KVM_MIPS_TLB if guest TLB not present.
1193 * KVM_MIPS_TLBINV if guest TLB present but not valid.
1194 * KVM_MIPS_TLBMOD if guest TLB read only.
1195 */
1196enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
1197 unsigned long gva,
1198 bool write)
1199{
1200 struct mips_coproc *cop0 = vcpu->arch.cop0;
1201 struct kvm_mips_tlb *tlb;
1202 int index;
1203
1204 if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
577ed7f7 1205 if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
1880afd6
JH
1206 return KVM_MIPS_GPA;
1207 } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
1208 KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
1209 /* Address should be in the guest TLB */
1210 index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
1211 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
1212 if (index < 0)
1213 return KVM_MIPS_TLB;
1214 tlb = &vcpu->arch.guest_tlb[index];
1215
1216 /* Entry should be valid, and dirty for writes */
1217 if (!TLB_IS_VALID(*tlb, gva))
1218 return KVM_MIPS_TLBINV;
1219 if (write && !TLB_IS_DIRTY(*tlb, gva))
1220 return KVM_MIPS_TLBMOD;
1221
577ed7f7 1222 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
1880afd6
JH
1223 return KVM_MIPS_GPA;
1224 } else {
1225 return KVM_MIPS_GVA;
1226 }
1227
1228 return KVM_MIPS_MAPPED;
1229}
1230
122e51d4 1231int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
403015b3 1232{
dacc3ed1
JH
1233 int err;
1234
c992a4f6
JH
1235 if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
1236 "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
1237 return -EINVAL;
1238
5207ce14
JH
1239retry:
1240 kvm_trap_emul_gva_lockless_begin(vcpu);
122e51d4 1241 err = get_user(*out, opc);
5207ce14
JH
1242 kvm_trap_emul_gva_lockless_end(vcpu);
1243
dacc3ed1 1244 if (unlikely(err)) {
5207ce14
JH
1245 /*
1246 * Try to handle the fault, maybe we just raced with a GVA
1247 * invalidation.
1248 */
1249 err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
1250 false);
1251 if (unlikely(err)) {
1252 kvm_err("%s: illegal address: %p\n",
1253 __func__, opc);
1254 return -EFAULT;
1255 }
403015b3 1256
5207ce14
JH
1257 /* Hopefully it'll work now */
1258 goto retry;
1259 }
122e51d4 1260 return 0;
403015b3 1261}