From 0387d79e24d6cd816ea600f91607bd27c680a897 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 10 Oct 2024 19:10:35 -0700 Subject: [PATCH] KVM: x86/mmu: Fold all of make_spte()'s writable handling into one if-else Now that make_spte() no longer uses a funky goto to bail out for a special case of its unsync handling, combine all of the unsync vs. writable logic into a single if-else statement. No functional change intended. Link: https://lore.kernel.org/r/20241011021051.1557902-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/spte.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 09ce93c4916a..030813781a63 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -217,8 +217,6 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, spte |= (u64)pfn << PAGE_SHIFT; if (pte_access & ACC_WRITE_MASK) { - spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask; - /* * Unsync shadow pages that are reachable by the new, writable * SPTE. Write-protect the SPTE if the page can't be unsync'd, @@ -233,16 +231,13 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * guaranteed by both the shadow MMU and the TDP MMU. */ if ((!is_last_spte(old_spte, level) || !is_writable_pte(old_spte)) && - mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch)) { + mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch)) wrprot = true; - pte_access &= ~ACC_WRITE_MASK; - spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); - } + else + spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask | + spte_shadow_dirty_mask(spte); } - if (pte_access & ACC_WRITE_MASK) - spte |= spte_shadow_dirty_mask(spte); - if (prefetch && !synchronizing) spte = mark_spte_for_access_track(spte); -- 2.25.1