KVM: MMU: Track page fault data in struct vcpu
[linux-2.6-block.git] / arch / x86 / kvm / mmu.c
index 1c784b96dac33167ee71d0d9d6104a2cd8bf0363..99367274b97cf31b9b2bc60f2734fcb390af2f9a 100644 (file)
  */
 bool tdp_enabled = false;
 
-#undef MMU_DEBUG
+enum {
+       AUDIT_PRE_PAGE_FAULT,
+       AUDIT_POST_PAGE_FAULT,
+       AUDIT_PRE_PTE_WRITE,
+       AUDIT_POST_PTE_WRITE
+};
 
-#undef AUDIT
+char *audit_point_name[] = {
+       "pre page fault",
+       "post page fault",
+       "pre pte write",
+       "post pte write"
+};
 
-#ifdef AUDIT
-static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
-#else
-static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
-#endif
+#undef MMU_DEBUG
 
 #ifdef MMU_DEBUG
 
@@ -71,7 +77,7 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
 
 #endif
 
-#if defined(MMU_DEBUG) || defined(AUDIT)
+#ifdef MMU_DEBUG
 static int dbg = 0;
 module_param(dbg, bool, 0644);
 #endif
@@ -1442,7 +1448,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        if (role.direct)
                role.cr4_pae = 0;
        role.access = access;
-       if (!tdp_enabled && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
+       if (!vcpu->arch.mmu.direct_map
+           && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
                quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
                role.quadrant = quadrant;
@@ -1967,7 +1974,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= shadow_user_mask;
        if (level > PT_PAGE_TABLE_LEVEL)
                spte |= PT_PAGE_SIZE_MASK;
-       if (tdp_enabled)
+       if (vcpu->arch.mmu.direct_map)
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
                        kvm_is_mmio_pfn(pfn));
 
@@ -1977,8 +1984,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
        spte |= (u64)pfn << PAGE_SHIFT;
 
        if ((pte_access & ACC_WRITE_MASK)
-           || (!tdp_enabled && write_fault && !is_write_protection(vcpu)
-               && !user_fault)) {
+           || (!vcpu->arch.mmu.direct_map && write_fault
+               && !is_write_protection(vcpu) && !user_fault)) {
 
                if (level > PT_PAGE_TABLE_LEVEL &&
                    has_wrprotected_page(vcpu->kvm, gfn, level)) {
@@ -1989,7 +1996,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
                spte |= PT_WRITABLE_MASK;
 
-               if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
+               if (!vcpu->arch.mmu.direct_map
+                   && !(pte_access & ACC_WRITE_MASK))
                        spte &= ~PT_USER_MASK;
 
                /*
@@ -2357,7 +2365,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
        int direct = 0;
        u64 pdptr;
 
-       root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
+       root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
 
        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
@@ -2365,7 +2373,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                ASSERT(!VALID_PAGE(root));
                if (mmu_check_root(vcpu, root_gfn))
                        return 1;
-               if (tdp_enabled) {
+               if (vcpu->arch.mmu.direct_map) {
                        direct = 1;
                        root_gfn = 0;
                }
@@ -2381,6 +2389,10 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                return 0;
        }
        direct = !is_paging(vcpu);
+
+       if (mmu_check_root(vcpu, root_gfn))
+               return 1;
+
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
@@ -2392,11 +2404,11 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
                                continue;
                        }
                        root_gfn = pdptr >> PAGE_SHIFT;
+                       if (mmu_check_root(vcpu, root_gfn))
+                               return 1;
                } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
-               if (mmu_check_root(vcpu, root_gfn))
-                       return 1;
-               if (tdp_enabled) {
+               if (vcpu->arch.mmu.direct_map) {
                        direct = 1;
                        root_gfn = i << 30;
                }
@@ -2520,10 +2532,9 @@ static void nonpaging_free(struct kvm_vcpu *vcpu)
        mmu_free_roots(vcpu);
 }
 
-static int nonpaging_init_context(struct kvm_vcpu *vcpu)
+static int nonpaging_init_context(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu *context)
 {
-       struct kvm_mmu *context = &vcpu->arch.mmu;
-
        context->new_cr3 = nonpaging_new_cr3;
        context->page_fault = nonpaging_page_fault;
        context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -2534,6 +2545,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
        context->root_level = 0;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
+       context->direct_map = true;
        return 0;
 }
 
@@ -2549,11 +2561,14 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
        mmu_free_roots(vcpu);
 }
 
-static void inject_page_fault(struct kvm_vcpu *vcpu,
-                             u64 addr,
-                             u32 err_code)
+static unsigned long get_cr3(struct kvm_vcpu *vcpu)
 {
-       kvm_inject_page_fault(vcpu, addr, err_code);
+       return vcpu->arch.cr3;
+}
+
+static void inject_page_fault(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.mmu.inject_page_fault(vcpu);
 }
 
 static void paging_free(struct kvm_vcpu *vcpu)
@@ -2561,12 +2576,12 @@ static void paging_free(struct kvm_vcpu *vcpu)
        nonpaging_free(vcpu);
 }
 
-static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
+static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
 {
        int bit7;
 
        bit7 = (gpte >> 7) & 1;
-       return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
+       return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
 }
 
 #define PTTYPE 64
@@ -2577,9 +2592,10 @@ static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
 #include "paging_tmpl.h"
 #undef PTTYPE
 
-static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
+static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu *context,
+                                 int level)
 {
-       struct kvm_mmu *context = &vcpu->arch.mmu;
        int maxphyaddr = cpuid_maxphyaddr(vcpu);
        u64 exb_bit_rsvd = 0;
 
@@ -2638,9 +2654,11 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
        }
 }
 
-static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
+static int paging64_init_context_common(struct kvm_vcpu *vcpu,
+                                       struct kvm_mmu *context,
+                                       int level)
 {
-       struct kvm_mmu *context = &vcpu->arch.mmu;
+       reset_rsvds_bits_mask(vcpu, context, level);
 
        ASSERT(is_pae(vcpu));
        context->new_cr3 = paging_new_cr3;
@@ -2653,20 +2671,21 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
        context->root_level = level;
        context->shadow_root_level = level;
        context->root_hpa = INVALID_PAGE;
+       context->direct_map = false;
        return 0;
 }
 
-static int paging64_init_context(struct kvm_vcpu *vcpu)
+static int paging64_init_context(struct kvm_vcpu *vcpu,
+                                struct kvm_mmu *context)
 {
-       reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
-       return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
+       return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
 }
 
-static int paging32_init_context(struct kvm_vcpu *vcpu)
+static int paging32_init_context(struct kvm_vcpu *vcpu,
+                                struct kvm_mmu *context)
 {
-       struct kvm_mmu *context = &vcpu->arch.mmu;
+       reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
 
-       reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
        context->new_cr3 = paging_new_cr3;
        context->page_fault = paging32_page_fault;
        context->gva_to_gpa = paging32_gva_to_gpa;
@@ -2677,13 +2696,14 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
        context->root_level = PT32_ROOT_LEVEL;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->root_hpa = INVALID_PAGE;
+       context->direct_map = false;
        return 0;
 }
 
-static int paging32E_init_context(struct kvm_vcpu *vcpu)
+static int paging32E_init_context(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu *context)
 {
-       reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
-       return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
+       return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
 }
 
 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
@@ -2698,20 +2718,24 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
        context->invlpg = nonpaging_invlpg;
        context->shadow_root_level = kvm_x86_ops->get_tdp_level();
        context->root_hpa = INVALID_PAGE;
+       context->direct_map = true;
+       context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
+       context->get_cr3 = get_cr3;
+       context->inject_page_fault = kvm_inject_page_fault;
 
        if (!is_paging(vcpu)) {
                context->gva_to_gpa = nonpaging_gva_to_gpa;
                context->root_level = 0;
        } else if (is_long_mode(vcpu)) {
-               reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
+               reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL);
                context->gva_to_gpa = paging64_gva_to_gpa;
                context->root_level = PT64_ROOT_LEVEL;
        } else if (is_pae(vcpu)) {
-               reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
+               reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL);
                context->gva_to_gpa = paging64_gva_to_gpa;
                context->root_level = PT32E_ROOT_LEVEL;
        } else {
-               reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
+               reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL);
                context->gva_to_gpa = paging32_gva_to_gpa;
                context->root_level = PT32_ROOT_LEVEL;
        }
@@ -2719,24 +2743,35 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
+int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
 {
        int r;
-
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
        if (!is_paging(vcpu))
-               r = nonpaging_init_context(vcpu);
+               r = nonpaging_init_context(vcpu, context);
        else if (is_long_mode(vcpu))
-               r = paging64_init_context(vcpu);
+               r = paging64_init_context(vcpu, context);
        else if (is_pae(vcpu))
-               r = paging32E_init_context(vcpu);
+               r = paging32E_init_context(vcpu, context);
        else
-               r = paging32_init_context(vcpu);
+               r = paging32_init_context(vcpu, context);
 
        vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
-       vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
+       vcpu->arch.mmu.base_role.cr0_wp  = is_write_protection(vcpu);
+
+       return r;
+}
+EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
+
+static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
+{
+       int r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
+
+       vcpu->arch.mmu.set_cr3           = kvm_x86_ops->set_cr3;
+       vcpu->arch.mmu.get_cr3           = get_cr3;
+       vcpu->arch.mmu.inject_page_fault = kvm_inject_page_fault;
 
        return r;
 }
@@ -2780,7 +2815,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
        if (r)
                goto out;
        /* set_cr3() should ensure TLB has been flushed */
-       kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
+       vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
 out:
        return r;
 }
@@ -2822,7 +2857,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                return;
         }
 
-       if (is_rsvd_bits_set(vcpu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
+       if (is_rsvd_bits_set(&vcpu->arch.mmu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
                return;
 
        ++vcpu->kvm->stat.mmu_pte_updated;
@@ -2964,7 +2999,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        kvm_mmu_access_page(vcpu, gfn);
        kvm_mmu_free_some_pages(vcpu);
        ++vcpu->kvm->stat.mmu_pte_write;
-       kvm_mmu_audit(vcpu, "pre pte write");
+       trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
        if (guest_initiated) {
                if (gfn == vcpu->arch.last_pt_write_gfn
                    && !last_updated_pte_accessed(vcpu)) {
@@ -3037,7 +3072,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        }
        mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-       kvm_mmu_audit(vcpu, "post pte write");
+       trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
        spin_unlock(&vcpu->kvm->mmu_lock);
        if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
                kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
@@ -3050,7 +3085,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
        gpa_t gpa;
        int r;
 
-       if (tdp_enabled)
+       if (vcpu->arch.mmu.direct_map)
                return 0;
 
        gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
@@ -3483,266 +3518,6 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
 
-#ifdef AUDIT
-
-static const char *audit_msg;
-
-static gva_t canonicalize(gva_t gva)
-{
-#ifdef CONFIG_X86_64
-       gva = (long long)(gva << 16) >> 16;
-#endif
-       return gva;
-}
-
-
-typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
-
-static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
-                           inspect_spte_fn fn)
-{
-       int i;
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
-               u64 ent = sp->spt[i];
-
-               if (is_shadow_present_pte(ent)) {
-                       if (!is_last_spte(ent, sp->role.level)) {
-                               struct kvm_mmu_page *child;
-                               child = page_header(ent & PT64_BASE_ADDR_MASK);
-                               __mmu_spte_walk(kvm, child, fn);
-                       } else
-                               fn(kvm, &sp->spt[i]);
-               }
-       }
-}
-
-static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
-{
-       int i;
-       struct kvm_mmu_page *sp;
-
-       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
-               return;
-       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
-               hpa_t root = vcpu->arch.mmu.root_hpa;
-               sp = page_header(root);
-               __mmu_spte_walk(vcpu->kvm, sp, fn);
-               return;
-       }
-       for (i = 0; i < 4; ++i) {
-               hpa_t root = vcpu->arch.mmu.pae_root[i];
-
-               if (root && VALID_PAGE(root)) {
-                       root &= PT64_BASE_ADDR_MASK;
-                       sp = page_header(root);
-                       __mmu_spte_walk(vcpu->kvm, sp, fn);
-               }
-       }
-       return;
-}
-
-static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
-                               gva_t va, int level)
-{
-       u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
-       int i;
-       gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
-               u64 ent = pt[i];
-
-               if (ent == shadow_trap_nonpresent_pte)
-                       continue;
-
-               va = canonicalize(va);
-               if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
-                       audit_mappings_page(vcpu, ent, va, level - 1);
-               else {
-                       gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
-                       gfn_t gfn = gpa >> PAGE_SHIFT;
-                       pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
-                       hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
-
-                       if (is_error_pfn(pfn)) {
-                               kvm_release_pfn_clean(pfn);
-                               continue;
-                       }
-
-                       if (is_shadow_present_pte(ent)
-                           && (ent & PT64_BASE_ADDR_MASK) != hpa)
-                               printk(KERN_ERR "xx audit error: (%s) levels %d"
-                                      " gva %lx gpa %llx hpa %llx ent %llx %d\n",
-                                      audit_msg, vcpu->arch.mmu.root_level,
-                                      va, gpa, hpa, ent,
-                                      is_shadow_present_pte(ent));
-                       else if (ent == shadow_notrap_nonpresent_pte
-                                && !is_error_hpa(hpa))
-                               printk(KERN_ERR "audit: (%s) notrap shadow,"
-                                      " valid guest gva %lx\n", audit_msg, va);
-                       kvm_release_pfn_clean(pfn);
-
-               }
-       }
-}
-
-static void audit_mappings(struct kvm_vcpu *vcpu)
-{
-       unsigned i;
-
-       if (vcpu->arch.mmu.root_level == 4)
-               audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
-       else
-               for (i = 0; i < 4; ++i)
-                       if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
-                               audit_mappings_page(vcpu,
-                                                   vcpu->arch.mmu.pae_root[i],
-                                                   i << 30,
-                                                   2);
-}
-
-static int count_rmaps(struct kvm_vcpu *vcpu)
-{
-       struct kvm *kvm = vcpu->kvm;
-       struct kvm_memslots *slots;
-       int nmaps = 0;
-       int i, j, k, idx;
-
-       idx = srcu_read_lock(&kvm->srcu);
-       slots = kvm_memslots(kvm);
-       for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
-               struct kvm_memory_slot *m = &slots->memslots[i];
-               struct kvm_rmap_desc *d;
-
-               for (j = 0; j < m->npages; ++j) {
-                       unsigned long *rmapp = &m->rmap[j];
-
-                       if (!*rmapp)
-                               continue;
-                       if (!(*rmapp & 1)) {
-                               ++nmaps;
-                               continue;
-                       }
-                       d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
-                       while (d) {
-                               for (k = 0; k < RMAP_EXT; ++k)
-                                       if (d->sptes[k])
-                                               ++nmaps;
-                                       else
-                                               break;
-                               d = d->more;
-                       }
-               }
-       }
-       srcu_read_unlock(&kvm->srcu, idx);
-       return nmaps;
-}
-
-void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
-{
-       unsigned long *rmapp;
-       struct kvm_mmu_page *rev_sp;
-       gfn_t gfn;
-
-
-       rev_sp = page_header(__pa(sptep));
-       gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
-
-       if (!gfn_to_memslot(kvm, gfn)) {
-               if (!printk_ratelimit())
-                       return;
-               printk(KERN_ERR "%s: no memslot for gfn %llx\n",
-                                audit_msg, gfn);
-               printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n",
-                      audit_msg, (long int)(sptep - rev_sp->spt),
-                               rev_sp->gfn);
-               dump_stack();
-               return;
-       }
-
-       rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
-       if (!*rmapp) {
-               if (!printk_ratelimit())
-                       return;
-               printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
-                                audit_msg, *sptep);
-               dump_stack();
-       }
-}
-
-void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu)
-{
-       mmu_spte_walk(vcpu, inspect_spte_has_rmap);
-}
-
-static void check_mappings_rmap(struct kvm_vcpu *vcpu)
-{
-       struct kvm_mmu_page *sp;
-       int i;
-
-       list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
-               u64 *pt = sp->spt;
-
-               if (sp->role.level != PT_PAGE_TABLE_LEVEL)
-                       continue;
-
-               for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
-                       if (!is_rmap_spte(pt[i]))
-                               continue;
-
-                       inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
-               }
-       }
-       return;
-}
-
-static void audit_rmap(struct kvm_vcpu *vcpu)
-{
-       check_mappings_rmap(vcpu);
-       count_rmaps(vcpu);
-}
-
-static void audit_write_protection(struct kvm_vcpu *vcpu)
-{
-       struct kvm_mmu_page *sp;
-       struct kvm_memory_slot *slot;
-       unsigned long *rmapp;
-       u64 *spte;
-       gfn_t gfn;
-
-       list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
-               if (sp->role.direct)
-                       continue;
-               if (sp->unsync)
-                       continue;
-
-               slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
-               rmapp = &slot->rmap[gfn - slot->base_gfn];
-
-               spte = rmap_next(vcpu->kvm, rmapp, NULL);
-               while (spte) {
-                       if (is_writable_pte(*spte))
-                               printk(KERN_ERR "%s: (%s) shadow page has "
-                               "writable mappings: gfn %llx role %x\n",
-                              __func__, audit_msg, sp->gfn,
-                              sp->role.word);
-                       spte = rmap_next(vcpu->kvm, rmapp, spte);
-               }
-       }
-}
-
-static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
-{
-       int olddbg = dbg;
-
-       dbg = 0;
-       audit_msg = msg;
-       audit_rmap(vcpu);
-       audit_write_protection(vcpu);
-       if (strcmp("pre pte write", audit_msg) != 0)
-               audit_mappings(vcpu);
-       audit_sptes_have_rmaps(vcpu);
-       dbg = olddbg;
-}
-
+#ifdef CONFIG_KVM_MMU_AUDIT
+#include "mmu_audit.c"
 #endif