1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/highmem.h>
7 #include <linux/hugetlb.h>
8 #include <linux/kvm_host.h>
9 #include <linux/page-flags.h>
10 #include <linux/uaccess.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgalloc.h>
14 #include <asm/kvm_mmu.h>
16 static inline bool kvm_hugepage_capable(struct kvm_memory_slot *slot)
18 return slot->arch.flags & KVM_MEM_HUGEPAGE_CAPABLE;
21 static inline bool kvm_hugepage_incapable(struct kvm_memory_slot *slot)
23 return slot->arch.flags & KVM_MEM_HUGEPAGE_INCAPABLE;
26 static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx)
28 ctx->level = kvm->arch.root_level;
30 ctx->invalid_ptes = kvm->arch.invalid_ptes;
31 ctx->pte_shifts = kvm->arch.pte_shifts;
32 ctx->pgtable_shift = ctx->pte_shifts[ctx->level];
33 ctx->invalid_entry = ctx->invalid_ptes[ctx->level];
38 * Mark a range of guest physical address space old (all accesses fault) in the
39 * VM's GPA page table to allow detection of commonly used pages.
41 static int kvm_mkold_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx)
43 if (kvm_pte_young(*pte)) {
44 *pte = kvm_pte_mkold(*pte);
52 * Mark a range of guest physical address space clean (writes fault) in the VM's
53 * GPA page table to allow dirty page tracking.
55 static int kvm_mkclean_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx)
62 * For kvm_arch_mmu_enable_log_dirty_pt_masked with mask, start and end
63 * may cross hugepage, for first huge page parameter addr is equal to
64 * start, however for the second huge page addr is base address of
65 * this huge page, rather than start or end address
67 if ((ctx->flag & _KVM_HAS_PGMASK) && !kvm_pte_huge(val)) {
68 offset = (addr >> PAGE_SHIFT) - ctx->gfn;
69 if (!(BIT(offset) & ctx->mask))
74 * Need not split huge page now, just set write-proect pte bit
75 * Split huge page until next write fault
77 if (kvm_pte_dirty(val)) {
78 *pte = kvm_pte_mkclean(val);
88 static int kvm_flush_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx)
94 kvm->stat.hugepages--;
98 *pte = ctx->invalid_entry;
104 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
106 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
107 * to host physical page mappings.
109 * Returns: Pointer to new KVM GPA page directory.
110 * NULL on allocation failure.
112 kvm_pte_t *kvm_pgd_alloc(void)
116 pgd = (kvm_pte_t *)__get_free_pages(GFP_KERNEL, 0);
118 pgd_init((void *)pgd);
123 static void _kvm_pte_init(void *addr, unsigned long val)
125 unsigned long *p, *end;
127 p = (unsigned long *)addr;
128 end = p + PTRS_PER_PTE;
143 * Caller must hold kvm->mm_lock
145 * Walk the page tables of kvm to find the PTE corresponding to the
146 * address @addr. If page tables don't exist for @addr, they will be created
147 * from the MMU cache if @cache is not NULL.
149 static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm,
150 struct kvm_mmu_memory_cache *cache,
151 unsigned long addr, int level)
154 kvm_pte_t *entry, *child;
156 kvm_ptw_prepare(kvm, &ctx);
157 child = kvm->arch.pgd;
158 while (ctx.level > level) {
159 entry = kvm_pgtable_offset(&ctx, child, addr);
160 if (kvm_pte_none(&ctx, entry)) {
164 child = kvm_mmu_memory_cache_alloc(cache);
165 _kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]);
166 kvm_set_pte(entry, __pa(child));
167 } else if (kvm_pte_huge(*entry)) {
170 child = (kvm_pte_t *)__va(PHYSADDR(*entry));
174 entry = kvm_pgtable_offset(&ctx, child, addr);
180 * Page walker for VM shadow mmu at last level
181 * The last level is small pte page or huge pmd page
183 static int kvm_ptw_leaf(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx)
186 phys_addr_t next, start, size;
187 struct list_head *list;
188 kvm_pte_t *entry, *child;
192 child = (kvm_pte_t *)__va(PHYSADDR(*dir));
193 entry = kvm_pgtable_offset(ctx, child, addr);
195 next = addr + (0x1UL << ctx->pgtable_shift);
196 if (!kvm_pte_present(ctx, entry))
199 ret |= ctx->ops(entry, addr, ctx);
200 } while (entry++, addr = next, addr < end);
202 if (kvm_need_flush(ctx)) {
203 size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3);
204 if (start + size == end) {
205 list = (struct list_head *)child;
206 list_add_tail(list, &ctx->list);
207 *dir = ctx->invalid_ptes[ctx->level + 1];
215 * Page walker for VM shadow mmu at page table dir level
217 static int kvm_ptw_dir(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx)
220 phys_addr_t next, start, size;
221 struct list_head *list;
222 kvm_pte_t *entry, *child;
226 child = (kvm_pte_t *)__va(PHYSADDR(*dir));
227 entry = kvm_pgtable_offset(ctx, child, addr);
229 next = kvm_pgtable_addr_end(ctx, addr, end);
230 if (!kvm_pte_present(ctx, entry))
233 if (kvm_pte_huge(*entry)) {
234 ret |= ctx->ops(entry, addr, ctx);
240 ret |= kvm_ptw_leaf(entry, addr, next, ctx);
242 ret |= kvm_ptw_dir(entry, addr, next, ctx);
244 } while (entry++, addr = next, addr < end);
246 if (kvm_need_flush(ctx)) {
247 size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3);
248 if (start + size == end) {
249 list = (struct list_head *)child;
250 list_add_tail(list, &ctx->list);
251 *dir = ctx->invalid_ptes[ctx->level + 1];
259 * Page walker for VM shadow mmu at page root table
261 static int kvm_ptw_top(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx)
268 entry = kvm_pgtable_offset(ctx, dir, addr);
270 next = kvm_pgtable_addr_end(ctx, addr, end);
271 if (!kvm_pte_present(ctx, entry))
275 ret |= kvm_ptw_dir(entry, addr, next, ctx);
277 } while (entry++, addr = next, addr < end);
283 * kvm_flush_range() - Flush a range of guest physical addresses.
285 * @start_gfn: Guest frame number of first page in GPA range to flush.
286 * @end_gfn: Guest frame number of last page in GPA range to flush.
287 * @lock: Whether to hold mmu_lock or not
289 * Flushes a range of GPA mappings from the GPA page tables.
291 static void kvm_flush_range(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn, int lock)
295 struct list_head *pos, *temp;
297 ctx.ops = kvm_flush_pte;
298 ctx.flag = _KVM_FLUSH_PGTABLE;
299 kvm_ptw_prepare(kvm, &ctx);
300 INIT_LIST_HEAD(&ctx.list);
303 spin_lock(&kvm->mmu_lock);
304 ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT,
305 end_gfn << PAGE_SHIFT, &ctx);
306 spin_unlock(&kvm->mmu_lock);
308 ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT,
309 end_gfn << PAGE_SHIFT, &ctx);
311 /* Flush vpid for each vCPU individually */
313 kvm_flush_remote_tlbs(kvm);
316 * free pte table page after mmu_lock
317 * the pte table page is linked together with ctx.list
319 list_for_each_safe(pos, temp, &ctx.list) {
321 free_page((unsigned long)pos);
326 * kvm_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
328 * @start_gfn: Guest frame number of first page in GPA range to flush.
329 * @end_gfn: Guest frame number of last page in GPA range to flush.
331 * Make a range of GPA mappings clean so that guest writes will fault and
332 * trigger dirty page logging.
334 * The caller must hold the @kvm->mmu_lock spinlock.
336 * Returns: Whether any GPA mappings were modified, which would require
337 * derived mappings (GVA page tables & TLB enties) to be
340 static int kvm_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
344 ctx.ops = kvm_mkclean_pte;
346 kvm_ptw_prepare(kvm, &ctx);
347 return kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, end_gfn << PAGE_SHIFT, &ctx);
351 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
352 * @kvm: The KVM pointer
353 * @slot: The memory slot associated with mask
354 * @gfn_offset: The gfn offset in memory slot
355 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
356 * slot to be write protected
358 * Walks bits set in mask write protects the associated pte's. Caller must
359 * acquire @kvm->mmu_lock.
361 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
362 struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask)
365 gfn_t base_gfn = slot->base_gfn + gfn_offset;
366 gfn_t start = base_gfn + __ffs(mask);
367 gfn_t end = base_gfn + __fls(mask) + 1;
369 ctx.ops = kvm_mkclean_pte;
370 ctx.flag = _KVM_HAS_PGMASK;
373 kvm_ptw_prepare(kvm, &ctx);
375 kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx);
378 int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old,
379 struct kvm_memory_slot *new, enum kvm_mr_change change)
383 size_t size, gpa_offset, hva_offset;
385 if ((change != KVM_MR_MOVE) && (change != KVM_MR_CREATE))
388 * Prevent userspace from creating a memory region outside of the
389 * VM GPA address space
391 if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT))
395 size = new->npages * PAGE_SIZE;
396 gpa_start = new->base_gfn << PAGE_SHIFT;
397 hva_start = new->userspace_addr;
398 if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE)
399 && IS_ALIGNED(hva_start, PMD_SIZE))
400 new->arch.flags |= KVM_MEM_HUGEPAGE_CAPABLE;
403 * Pages belonging to memslots that don't have the same
404 * alignment within a PMD for userspace and GPA cannot be
405 * mapped with PMD entries, because we'll end up mapping
408 * Consider a layout like the following:
410 * memslot->userspace_addr:
411 * +-----+--------------------+--------------------+---+
412 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
413 * +-----+--------------------+--------------------+---+
415 * memslot->base_gfn << PAGE_SIZE:
416 * +---+--------------------+--------------------+-----+
417 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
418 * +---+--------------------+--------------------+-----+
420 * If we create those stage-2 blocks, we'll end up with this
426 gpa_offset = gpa_start & (PMD_SIZE - 1);
427 hva_offset = hva_start & (PMD_SIZE - 1);
428 if (gpa_offset != hva_offset) {
429 new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE;
432 gpa_offset = PMD_SIZE;
433 if ((size + gpa_offset) < (PMD_SIZE * 2))
434 new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE;
441 void kvm_arch_commit_memory_region(struct kvm *kvm,
442 struct kvm_memory_slot *old,
443 const struct kvm_memory_slot *new,
444 enum kvm_mr_change change)
449 * If dirty page logging is enabled, write protect all pages in the slot
450 * ready for dirty logging.
452 * There is no need to do this in any of the following cases:
453 * CREATE: No dirty mappings will already exist.
454 * MOVE/DELETE: The old mappings will already have been cleaned up by
455 * kvm_arch_flush_shadow_memslot()
457 if (change == KVM_MR_FLAGS_ONLY &&
458 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
459 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
460 spin_lock(&kvm->mmu_lock);
461 /* Write protect GPA page table entries */
462 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn,
463 new->base_gfn + new->npages);
464 spin_unlock(&kvm->mmu_lock);
466 kvm_flush_remote_tlbs(kvm);
470 void kvm_arch_flush_shadow_all(struct kvm *kvm)
472 kvm_flush_range(kvm, 0, kvm->arch.gpa_size >> PAGE_SHIFT, 0);
475 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
478 * The slot has been made invalid (ready for moving or deletion), so we
479 * need to ensure that it can no longer be accessed by any guest vCPUs.
481 kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1);
484 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
489 ctx.ops = kvm_flush_pte;
490 kvm_ptw_prepare(kvm, &ctx);
491 INIT_LIST_HEAD(&ctx.list);
493 return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT,
494 range->end << PAGE_SHIFT, &ctx);
497 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
499 unsigned long prot_bits;
501 kvm_pfn_t pfn = pte_pfn(range->arg.pte);
502 gpa_t gpa = range->start << PAGE_SHIFT;
504 ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
508 /* Replacing an absent or old page doesn't need flushes */
509 if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) {
510 kvm_set_pte(ptep, 0);
514 /* Fill new pte if write protected or page migrated */
515 prot_bits = _PAGE_PRESENT | __READABLE;
516 prot_bits |= _CACHE_MASK & pte_val(range->arg.pte);
519 * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support
520 * _PAGE_WRITE for map_page_fast if next page write fault
521 * _PAGE_DIRTY since gpa has already recorded as dirty page
523 prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte);
524 kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits)));
529 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
534 ctx.ops = kvm_mkold_pte;
535 kvm_ptw_prepare(kvm, &ctx);
537 return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT,
538 range->end << PAGE_SHIFT, &ctx);
541 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
543 gpa_t gpa = range->start << PAGE_SHIFT;
544 kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
546 if (ptep && kvm_pte_present(NULL, ptep) && kvm_pte_young(*ptep))
553 * kvm_map_page_fast() - Fast path GPA fault handler.
554 * @vcpu: vCPU pointer.
555 * @gpa: Guest physical address of fault.
556 * @write: Whether the fault was due to a write.
558 * Perform fast path GPA fault handling, doing all that can be done without
559 * calling into KVM. This handles marking old pages young (for idle page
560 * tracking), and dirtying of clean pages (for dirty page logging).
562 * Returns: 0 on success, in which case we can update derived mappings and
563 * resume guest execution.
564 * -EFAULT on failure due to absent GPA mapping or write to
565 * read-only page, in which case KVM must be consulted.
567 static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
571 kvm_pte_t *ptep, changed, new;
572 gfn_t gfn = gpa >> PAGE_SHIFT;
573 struct kvm *kvm = vcpu->kvm;
574 struct kvm_memory_slot *slot;
576 spin_lock(&kvm->mmu_lock);
578 /* Fast path - just check GPA page table for an existing entry */
579 ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
580 if (!ptep || !kvm_pte_present(NULL, ptep)) {
585 /* Track access to pages marked old */
587 if (!kvm_pte_young(new))
588 new = kvm_pte_mkyoung(new);
589 /* call kvm_set_pfn_accessed() after unlock */
591 if (write && !kvm_pte_dirty(new)) {
592 if (!kvm_pte_write(new)) {
597 if (kvm_pte_huge(new)) {
599 * Do not set write permission when dirty logging is
600 * enabled for HugePages
602 slot = gfn_to_memslot(kvm, gfn);
603 if (kvm_slot_dirty_track_enabled(slot)) {
609 /* Track dirtying of writeable pages */
610 new = kvm_pte_mkdirty(new);
613 changed = new ^ (*ptep);
615 kvm_set_pte(ptep, new);
616 pfn = kvm_pte_pfn(new);
618 spin_unlock(&kvm->mmu_lock);
621 * Fixme: pfn may be freed after mmu_lock
622 * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this?
624 if (kvm_pte_young(changed))
625 kvm_set_pfn_accessed(pfn);
627 if (kvm_pte_dirty(changed)) {
628 mark_page_dirty(kvm, gfn);
629 kvm_set_pfn_dirty(pfn);
633 spin_unlock(&kvm->mmu_lock);
637 static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
638 unsigned long hva, bool write)
642 /* Disable dirty logging on HugePages */
643 if (kvm_slot_dirty_track_enabled(memslot) && write)
646 if (kvm_hugepage_capable(memslot))
649 if (kvm_hugepage_incapable(memslot))
652 start = memslot->userspace_addr;
653 end = start + memslot->npages * PAGE_SIZE;
656 * Next, let's make sure we're not trying to map anything not covered
657 * by the memslot. This means we have to prohibit block size mappings
658 * for the beginning and end of a non-block aligned and non-block sized
659 * memory slot (illustrated by the head and tail parts of the
660 * userspace view above containing pages 'abcde' and 'xyz',
663 * Note that it doesn't matter if we do the check using the
664 * userspace_addr or the base_gfn, as both are equally aligned (per
665 * the check above) and equally sized.
667 return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE));
671 * Lookup the mapping level for @gfn in the current mm.
673 * WARNING! Use of host_pfn_mapping_level() requires the caller and the end
674 * consumer to be tied into KVM's handlers for MMU notifier events!
676 * There are several ways to safely use this helper:
678 * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
679 * consuming it. In this case, mmu_lock doesn't need to be held during the
680 * lookup, but it does need to be held while checking the MMU notifier.
682 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
683 * event for the hva. This can be done by explicit checking the MMU notifier
684 * or by ensuring that KVM already has a valid mapping that covers the hva.
686 * - Do not use the result to install new mappings, e.g. use the host mapping
687 * level only to decide whether or not to zap an entry. In this case, it's
688 * not required to hold mmu_lock (though it's highly likely the caller will
689 * want to hold mmu_lock anyways, e.g. to modify SPTEs).
691 * Note! The lookup can still race with modifications to host page tables, but
692 * the above "rules" ensure KVM will not _consume_ the result of the walk if a
693 * race with the primary MMU occurs.
695 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
696 const struct kvm_memory_slot *slot)
707 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
708 * is not solely for performance, it's also necessary to avoid the
709 * "writable" check in __gfn_to_hva_many(), which will always fail on
710 * read-only memslots due to gfn_to_hva() assuming writes. Earlier
711 * page fault steps have already verified the guest isn't writing a
714 hva = __gfn_to_hva_memslot(slot, gfn);
717 * Disable IRQs to prevent concurrent tear down of host page tables,
718 * e.g. if the primary MMU promotes a P*D to a huge page and then frees
719 * the original page table.
721 local_irq_save(flags);
724 * Read each entry once. As above, a non-leaf entry can be promoted to
725 * a huge page _during_ this walk. Re-reading the entry could send the
726 * walk into the weeks, e.g. p*d_large() returns false (sees the old
727 * value) and then p*d_offset() walks into the target huge page instead
728 * of the old page table (sees the new value).
730 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
734 p4d = READ_ONCE(*p4d_offset(&pgd, hva));
735 if (p4d_none(p4d) || !p4d_present(p4d))
738 pud = READ_ONCE(*pud_offset(&p4d, hva));
739 if (pud_none(pud) || !pud_present(pud))
742 pmd = READ_ONCE(*pmd_offset(&pud, hva));
743 if (pmd_none(pmd) || !pmd_present(pmd))
746 if (kvm_pte_huge(pmd_val(pmd)))
750 local_irq_restore(flags);
757 static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn)
760 kvm_pte_t val, *child;
761 struct kvm *kvm = vcpu->kvm;
762 struct kvm_mmu_memory_cache *memcache;
764 memcache = &vcpu->arch.mmu_page_cache;
765 child = kvm_mmu_memory_cache_alloc(memcache);
766 val = kvm_pte_mksmall(*ptep);
767 for (i = 0; i < PTRS_PER_PTE; i++) {
768 kvm_set_pte(child + i, val);
772 /* The later kvm_flush_tlb_gpa() will flush hugepage tlb */
773 kvm_set_pte(ptep, __pa(child));
775 kvm->stat.hugepages--;
776 kvm->stat.pages += PTRS_PER_PTE;
778 return child + (gfn & (PTRS_PER_PTE - 1));
782 * kvm_map_page() - Map a guest physical page.
783 * @vcpu: vCPU pointer.
784 * @gpa: Guest physical address of fault.
785 * @write: Whether the fault was due to a write.
787 * Handle GPA faults by creating a new GPA mapping (or updating an existing
790 * This takes care of marking pages young or dirty (idle/dirty page tracking),
791 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
792 * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
795 * Returns: 0 on success
796 * -EFAULT if there is no memory region at @gpa or a write was
797 * attempted to a read-only memory region. This is usually handled
800 static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
803 int srcu_idx, err, retry_no = 0, level;
804 unsigned long hva, mmu_seq, prot_bits;
806 kvm_pte_t *ptep, new_pte;
807 gfn_t gfn = gpa >> PAGE_SHIFT;
808 struct kvm *kvm = vcpu->kvm;
809 struct kvm_memory_slot *memslot;
810 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
812 /* Try the fast path to handle old / clean pages */
813 srcu_idx = srcu_read_lock(&kvm->srcu);
814 err = kvm_map_page_fast(vcpu, gpa, write);
818 memslot = gfn_to_memslot(kvm, gfn);
819 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable);
820 if (kvm_is_error_hva(hva) || (write && !writeable)) {
825 /* We need a minimum of cached pages ready for page table creation */
826 err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
832 * Used to check for invalidations in progress, of the pfn that is
833 * returned by pfn_to_pfn_prot below.
835 mmu_seq = kvm->mmu_invalidate_seq;
837 * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in
838 * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
839 * risk the page we get a reference to getting unmapped before we have a
840 * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
842 * This smp_rmb() pairs with the effective smp_wmb() of the combination
843 * of the pte_unmap_unlock() after the PTE is zapped, and the
844 * spin_lock() in kvm_mmu_invalidate_invalidate_<page|range_end>() before
845 * mmu_invalidate_seq is incremented.
849 /* Slow path - ask KVM core whether we can access this GPA */
850 pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable);
851 if (is_error_noslot_pfn(pfn)) {
856 /* Check if an invalidation has taken place since we got pfn */
857 spin_lock(&kvm->mmu_lock);
858 if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
860 * This can happen when mappings are changed asynchronously, but
861 * also synchronously if a COW is triggered by
864 spin_unlock(&kvm->mmu_lock);
865 kvm_release_pfn_clean(pfn);
866 if (retry_no > 100) {
875 * For emulated devices such virtio device, actual cache attribute is
876 * determined by physical machine.
877 * For pass through physical device, it should be uncachable
879 prot_bits = _PAGE_PRESENT | __READABLE;
881 prot_bits |= _CACHE_CC;
883 prot_bits |= _CACHE_SUC;
886 prot_bits |= _PAGE_WRITE;
888 prot_bits |= __WRITEABLE;
891 /* Disable dirty logging on HugePages */
893 if (!fault_supports_huge_mapping(memslot, hva, write)) {
896 level = host_pfn_mapping_level(kvm, gfn, memslot);
898 gfn = gfn & ~(PTRS_PER_PTE - 1);
899 pfn = pfn & ~(PTRS_PER_PTE - 1);
903 /* Ensure page tables are allocated */
904 ptep = kvm_populate_gpa(kvm, memcache, gpa, level);
905 new_pte = kvm_pfn_pte(pfn, __pgprot(prot_bits));
907 new_pte = kvm_pte_mkhuge(new_pte);
909 * previous pmd entry is invalid_pte_table
910 * there is invalid tlb with small page
911 * need flush these invalid tlbs for current vcpu
913 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
914 ++kvm->stat.hugepages;
915 } else if (kvm_pte_huge(*ptep) && write)
916 ptep = kvm_split_huge(vcpu, ptep, gfn);
919 kvm_set_pte(ptep, new_pte);
920 spin_unlock(&kvm->mmu_lock);
922 if (prot_bits & _PAGE_DIRTY) {
923 mark_page_dirty_in_slot(kvm, memslot, gfn);
924 kvm_set_pfn_dirty(pfn);
927 kvm_set_pfn_accessed(pfn);
928 kvm_release_pfn_clean(pfn);
930 srcu_read_unlock(&kvm->srcu, srcu_idx);
934 int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
938 ret = kvm_map_page(vcpu, gpa, write);
942 /* Invalidate this entry in the TLB */
943 kvm_flush_tlb_gpa(vcpu, gpa);
948 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
952 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
953 const struct kvm_memory_slot *memslot)
955 kvm_flush_remote_tlbs(kvm);