KVM: x86/mmu: Fold mmu_spte_update_no_track() into mmu_spte_update()
authorSean Christopherson <seanjc@google.com>
Fri, 11 Oct 2024 02:10:39 +0000 (19:10 -0700)
committerSean Christopherson <seanjc@google.com>
Wed, 30 Oct 2024 21:46:46 +0000 (14:46 -0700)
Fold the guts of mmu_spte_update_no_track() into mmu_spte_update() now
that the latter doesn't flush when clearing A/D bits, i.e. now that there
is no need to explicitly avoid TLB flushes when aging SPTEs.

Opportunistically WARN if mmu_spte_update() requests a TLB flush when
aging SPTEs, as aging should never modify a SPTE in such a way that KVM
thinks a TLB flush is needed.

Link: https://lore.kernel.org/r/20241011021051.1557902-8-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/mmu.c

index cb4f0a8c52e02f28610f550190a23e5eadf806e6..5f7d7db30b21039a4ddbfa8404ce8060f4e1b5b0 100644 (file)
@@ -485,32 +485,6 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
        __set_spte(sptep, new_spte);
 }
 
-/*
- * Update the SPTE (excluding the PFN), but do not track changes in its
- * accessed/dirty status.
- */
-static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
-{
-       u64 old_spte = *sptep;
-
-       WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
-       check_spte_writable_invariants(new_spte);
-
-       if (!is_shadow_present_pte(old_spte)) {
-               mmu_spte_set(sptep, new_spte);
-               return old_spte;
-       }
-
-       if (!spte_has_volatile_bits(old_spte))
-               __update_clear_spte_fast(sptep, new_spte);
-       else
-               old_spte = __update_clear_spte_slow(sptep, new_spte);
-
-       WARN_ON_ONCE(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
-
-       return old_spte;
-}
-
 /* Rules for using mmu_spte_update:
  * Update the state bits, it means the mapped pfn is not changed.
  *
@@ -535,10 +509,23 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
  */
 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
 {
-       u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
+       u64 old_spte = *sptep;
 
-       if (!is_shadow_present_pte(old_spte))
+       WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
+       check_spte_writable_invariants(new_spte);
+
+       if (!is_shadow_present_pte(old_spte)) {
+               mmu_spte_set(sptep, new_spte);
                return false;
+       }
+
+       if (!spte_has_volatile_bits(old_spte))
+               __update_clear_spte_fast(sptep, new_spte);
+       else
+               old_spte = __update_clear_spte_slow(sptep, new_spte);
+
+       WARN_ON_ONCE(!is_shadow_present_pte(old_spte) ||
+                    spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
 
        return is_mmu_writable_spte(old_spte) && !is_mmu_writable_spte(new_spte);
 }
@@ -1598,8 +1585,13 @@ static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
                                clear_bit((ffs(shadow_accessed_mask) - 1),
                                        (unsigned long *)sptep);
                        } else {
+                               /*
+                                * WARN if mmu_spte_update() signals the need
+                                * for a TLB flush, as Access tracking a SPTE
+                                * should never trigger an _immediate_ flush.
+                                */
                                spte = mark_spte_for_access_track(spte);
-                               mmu_spte_update_no_track(sptep, spte);
+                               WARN_ON_ONCE(mmu_spte_update(sptep, spte));
                        }
                        young = true;
                }