KVM: MMU: don't write-protect if have new mapping to unsync page
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Sat, 15 May 2010 10:52:34 +0000 (18:52 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 1 Aug 2010 07:35:50 +0000 (10:35 +0300)
Two cases maybe happen in kvm_mmu_get_page() function:

- one case is, the goal sp is already in cache, if the sp is unsync,
  we only need update it to assure this mapping is valid, but not
  mark it sync and not write-protect sp->gfn since it not broke unsync
  rule(one shadow page for a gfn)

- another case is, the goal sp not existed, we need create a new sp
  for gfn, i.e, gfn (may)has another shadow page, to keep unsync rule,
  we should sync(mark sync and write-protect) gfn's unsync shadow page.
  After enabling multiple unsync shadows, we sync those shadow pages
  only when the new sp not allow to become unsync(also for the unsyc
  rule, the new rule is: allow all pte page become unsync)

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index ef5d140a2705aa3dc165150f0183f8453d5c40fb..064ddfbde1080862ff469f715350fa9647f2ec91 100644 (file)
@@ -1337,7 +1337,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        unsigned index;
        unsigned quadrant;
        struct hlist_head *bucket;
-       struct kvm_mmu_page *sp;
+       struct kvm_mmu_page *sp, *unsync_sp = NULL;
        struct hlist_node *node, *tmp;
 
        role = vcpu->arch.mmu.base_role;
@@ -1356,20 +1356,30 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
                if (sp->gfn == gfn) {
                        if (sp->unsync)
-                               if (kvm_sync_page(vcpu, sp))
-                                       continue;
+                               unsync_sp = sp;
 
                        if (sp->role.word != role.word)
                                continue;
 
+                       if (!direct && unsync_sp &&
+                             kvm_sync_page_transient(vcpu, unsync_sp)) {
+                               unsync_sp = NULL;
+                               break;
+                       }
+
                        mmu_page_add_parent_pte(vcpu, sp, parent_pte);
                        if (sp->unsync_children) {
                                set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
                                kvm_mmu_mark_parents_unsync(sp);
-                       }
+                       } else if (sp->unsync)
+                               kvm_mmu_mark_parents_unsync(sp);
+
                        trace_kvm_mmu_get_page(sp, false);
                        return sp;
                }
+       if (!direct && unsync_sp)
+               kvm_sync_page(vcpu, unsync_sp);
+
        ++vcpu->kvm->stat.mmu_cache_miss;
        sp = kvm_mmu_alloc_page(vcpu, parent_pte);
        if (!sp)