KVM: MMU: Bail out immediately if there is no available mmu page
authorWanpeng Li <wanpeng.li@hotmail.com>
Thu, 10 Aug 2017 23:28:02 +0000 (16:28 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 11 Aug 2017 16:59:29 +0000 (18:59 +0200)
Bailing out immediately if there is no available mmu page to alloc.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index e4ce20bee5de16f7cd35b206191ee11cd5f710ac..e721e10afda109d39bce1d12e877580717c2e3c8 100644 (file)
@@ -3257,7 +3257,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
 
 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
                         gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
-static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
+static int make_mmu_pages_available(struct kvm_vcpu *vcpu);
 
 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
                         gfn_t gfn, bool prefault)
@@ -3297,7 +3297,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
        spin_lock(&vcpu->kvm->mmu_lock);
        if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
                goto out_unlock;
-       make_mmu_pages_available(vcpu);
+       if (make_mmu_pages_available(vcpu) < 0)
+               goto out_unlock;
        if (likely(!force_pt_level))
                transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
        r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
@@ -3376,7 +3377,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 
        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
                spin_lock(&vcpu->kvm->mmu_lock);
-               make_mmu_pages_available(vcpu);
+               if(make_mmu_pages_available(vcpu) < 0) {
+                       spin_unlock(&vcpu->kvm->mmu_lock);
+                       return 1;
+               }
                sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL);
                ++sp->root_count;
                spin_unlock(&vcpu->kvm->mmu_lock);
@@ -3387,7 +3391,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 
                        MMU_WARN_ON(VALID_PAGE(root));
                        spin_lock(&vcpu->kvm->mmu_lock);
-                       make_mmu_pages_available(vcpu);
+                       if (make_mmu_pages_available(vcpu) < 0) {
+                               spin_unlock(&vcpu->kvm->mmu_lock);
+                               return 1;
+                       }
                        sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
                                        i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
                        root = __pa(sp->spt);
@@ -3424,7 +3431,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                MMU_WARN_ON(VALID_PAGE(root));
 
                spin_lock(&vcpu->kvm->mmu_lock);
-               make_mmu_pages_available(vcpu);
+               if (make_mmu_pages_available(vcpu) < 0) {
+                       spin_unlock(&vcpu->kvm->mmu_lock);
+                       return 1;
+               }
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
                                      0, ACC_ALL);
                root = __pa(sp->spt);
@@ -3458,7 +3468,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                                return 1;
                }
                spin_lock(&vcpu->kvm->mmu_lock);
-               make_mmu_pages_available(vcpu);
+               if (make_mmu_pages_available(vcpu) < 0) {
+                       spin_unlock(&vcpu->kvm->mmu_lock);
+                       return 1;
+               }
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
                                      0, ACC_ALL);
                root = __pa(sp->spt);
@@ -3867,7 +3880,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
        spin_lock(&vcpu->kvm->mmu_lock);
        if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
                goto out_unlock;
-       make_mmu_pages_available(vcpu);
+       if (make_mmu_pages_available(vcpu) < 0)
+               goto out_unlock;
        if (likely(!force_pt_level))
                transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
        r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
@@ -4786,12 +4800,12 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
 
-static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
+static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
 {
        LIST_HEAD(invalid_list);
 
        if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
-               return;
+               return 0;
 
        while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
                if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
@@ -4800,6 +4814,10 @@ static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
                ++vcpu->kvm->stat.mmu_recycled;
        }
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+
+       if (!kvm_mmu_available_pages(vcpu->kvm))
+               return -ENOSPC;
+       return 0;
 }
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
index b0454c7e4cffe6c99e9619892a82afcd5d7ab70e..3bb90ceeb52dd9a3d4289c3d2f8684ca84fe4175 100644 (file)
@@ -819,7 +819,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
                goto out_unlock;
 
        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
-       make_mmu_pages_available(vcpu);
+       if (make_mmu_pages_available(vcpu) < 0)
+               goto out_unlock;
        if (!force_pt_level)
                transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
        r = FNAME(fetch)(vcpu, addr, &walker, write_fault,