KVM: MMU: Move accessed/dirty bit checks from rmap_remove() to drop_spte()
authorAvi Kivity <avi@redhat.com>
Sun, 6 Jun 2010 11:38:12 +0000 (14:38 +0300)
committerAvi Kivity <avi@redhat.com>
Mon, 2 Aug 2010 03:40:18 +0000 (06:40 +0300)
Since we need to make the check atomic, move it to the place that will
set the new spte.

Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index 1ad39cf70e18a6592b90c564cfe985f1e78650cd..fbdca08b8d8c960d40610dae2993445831b8f08f 100644 (file)
@@ -612,19 +612,11 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        struct kvm_rmap_desc *desc;
        struct kvm_rmap_desc *prev_desc;
        struct kvm_mmu_page *sp;
-       pfn_t pfn;
        gfn_t gfn;
        unsigned long *rmapp;
        int i;
 
-       if (!is_rmap_spte(*spte))
-               return;
        sp = page_header(__pa(spte));
-       pfn = spte_to_pfn(*spte);
-       if (*spte & shadow_accessed_mask)
-               kvm_set_pfn_accessed(pfn);
-       if (is_writable_pte(*spte))
-               kvm_set_pfn_dirty(pfn);
        gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
        rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
        if (!*rmapp) {
@@ -660,6 +652,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
 
 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
 {
+       pfn_t pfn;
+
+       if (!is_rmap_spte(*sptep)) {
+               __set_spte(sptep, new_spte);
+               return;
+       }
+       pfn = spte_to_pfn(*sptep);
+       if (*sptep & shadow_accessed_mask)
+               kvm_set_pfn_accessed(pfn);
+       if (is_writable_pte(*sptep))
+               kvm_set_pfn_dirty(pfn);
        rmap_remove(kvm, sptep);
        __set_spte(sptep, new_spte);
 }