Gather pending TLB flushes across both the legacy and TDP MMUs when
zapping collapsible SPTEs to avoid multiple flushes if both the legacy
MMU (for nested guests) and TDP MMU have mappings for the memslot.
Note, this also optimizes the TDP MMU to flush only the relevant range
when running as L1 with Hyper-V enlightenments.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <
20210326021957.
1424875-4-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
write_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+
+ if (is_tdp_mmu_enabled(kvm))
+ flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
+
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
- if (is_tdp_mmu_enabled(kvm))
- kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
write_unlock(&kvm->mmu_lock);
}
* Clear non-leaf entries (and free associated page tables) which could
* be replaced by large mappings, for GFNs within the slot.
*/
-void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- struct kvm_memory_slot *slot)
+bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ struct kvm_memory_slot *slot, bool flush)
{
struct kvm_mmu_page *root;
- bool flush = false;
int root_as_id;
for_each_tdp_mmu_root_yield_safe(kvm, root) {
flush = zap_collapsible_spte_range(kvm, root, slot, flush);
}
- if (flush)
- kvm_flush_remote_tlbs(kvm);
+ return flush;
}
/*
struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask,
bool wrprot);
-void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- struct kvm_memory_slot *slot);
+bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ struct kvm_memory_slot *slot, bool flush);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);