KVM/x86: Call smp_wmb() before increasing tlbs_dirty
authorLan Tianyu <tianyu.lan@intel.com>
Sun, 13 Mar 2016 03:10:27 +0000 (11:10 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 22 Mar 2016 15:38:32 +0000 (16:38 +0100)
Update spte before increasing tlbs_dirty to make sure no tlb flush
in lost after spte is zapped. This pairs with the barrier in the
kvm_flush_remote_tlbs().

Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/paging_tmpl.h

index e0c2254211574c29427be353aa68d4c856606ac4..1d971c7553c3847f0d1335487ce551a19875b709 100644 (file)
@@ -960,6 +960,12 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                        return 0;
 
                if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
+                       /*
+                        * Update spte before increasing tlbs_dirty to make
+                        * sure no tlb flush is lost after spte is zapped; see
+                        * the comments in kvm_flush_remote_tlbs().
+                        */
+                       smp_wmb();
                        vcpu->kvm->tlbs_dirty++;
                        continue;
                }
@@ -975,6 +981,11 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 
                if (gfn != sp->gfns[i]) {
                        drop_spte(vcpu->kvm, &sp->spt[i]);
+                       /*
+                        * The same as above where we are doing
+                        * prefetch_invalid_gpte().
+                        */
+                       smp_wmb();
                        vcpu->kvm->tlbs_dirty++;
                        continue;
                }