KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache
authorQuentin Perret <qperret@google.com>
Thu, 10 Nov 2022 19:02:53 +0000 (19:02 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 11 Nov 2022 17:18:58 +0000 (17:18 +0000)
Rather than relying on the host to free the previously-donated pKVM
hypervisor VM pages explicitly on teardown, introduce a dedicated
teardown memcache which allows the host to reclaim guest memory
resources without having to keep track of all of the allocations made by
the pKVM hypervisor at EL2.

Tested-by: Vincent Donnefort <vdonnefort@google.com>
Co-developed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
[maz: dropped __maybe_unused from unmap_donated_memory_noclear()]
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-21-will@kernel.org
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/pkvm.c

index 57218f0c449e0deec976ff285c7b11bdebc68442..63307e7dc9c5bded4b6c2678799df491c56c7ed8 100644 (file)
@@ -176,12 +176,7 @@ typedef unsigned int pkvm_handle_t;
 
 struct kvm_protected_vm {
        pkvm_handle_t handle;
-
-       struct {
-               void *pgd;
-               void *vm;
-               void *vcpus[KVM_MAX_VCPUS];
-       } hyp_donations;
+       struct kvm_hyp_memcache teardown_mc;
 };
 
 struct kvm_arch {
index 420b87e755a42c8d7666203b957f2cefda276361..b7bdbe63deed876f238f97cfd1f03f98497d8aaa 100644 (file)
@@ -76,7 +76,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
 
 int hyp_pin_shared_mem(void *from, void *to);
 void hyp_unpin_shared_mem(void *from, void *to);
-void reclaim_guest_pages(struct pkvm_hyp_vm *vm);
+void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
 int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
                    struct kvm_hyp_memcache *host_mc);
 
index 0162afba6dc4807cbf9a2022854aecf068895d7e..94cd48f7850e5210cf991199058dad2ecedaacba 100644 (file)
@@ -260,19 +260,24 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
        return 0;
 }
 
-void reclaim_guest_pages(struct pkvm_hyp_vm *vm)
+void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
 {
-       void *pgd = vm->pgt.pgd;
-       unsigned long nr_pages;
-
-       nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
+       void *addr;
 
+       /* Dump all pgtable pages in the hyp_pool */
        guest_lock_component(vm);
        kvm_pgtable_stage2_destroy(&vm->pgt);
        vm->kvm.arch.mmu.pgd_phys = 0ULL;
        guest_unlock_component(vm);
 
-       WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(pgd), nr_pages));
+       /* Drain the hyp_pool into the memcache */
+       addr = hyp_alloc_pages(&vm->pool, 0);
+       while (addr) {
+               memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
+               push_hyp_memcache(mc, addr, hyp_virt_to_phys);
+               WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
+               addr = hyp_alloc_pages(&vm->pool, 0);
+       }
 }
 
 int __pkvm_prot_finalize(void)
index 0768307566d42d9cf0d89c1d4c3cc9466c1b262f..81835c2f4c5a0a4b42ac28130dbd46a1174261b4 100644 (file)
@@ -393,7 +393,7 @@ static void unmap_donated_memory(void *va, size_t size)
        __unmap_donated_memory(va, size);
 }
 
-static void __maybe_unused unmap_donated_memory_noclear(void *va, size_t size)
+static void unmap_donated_memory_noclear(void *va, size_t size)
 {
        if (!va)
                return;
@@ -527,8 +527,21 @@ unlock:
        return ret;
 }
 
+static void
+teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
+{
+       size = PAGE_ALIGN(size);
+       memset(addr, 0, size);
+
+       for (void *start = addr; start < addr + size; start += PAGE_SIZE)
+               push_hyp_memcache(mc, start, hyp_virt_to_phys);
+
+       unmap_donated_memory_noclear(addr, size);
+}
+
 int __pkvm_teardown_vm(pkvm_handle_t handle)
 {
+       struct kvm_hyp_memcache *mc;
        struct pkvm_hyp_vm *hyp_vm;
        struct kvm *host_kvm;
        unsigned int idx;
@@ -547,25 +560,27 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
                goto err_unlock;
        }
 
+       host_kvm = hyp_vm->host_kvm;
+
        /* Ensure the VMID is clean before it can be reallocated */
        __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
        remove_vm_table_entry(handle);
        hyp_spin_unlock(&vm_table_lock);
 
        /* Reclaim guest pages (including page-table pages) */
-       reclaim_guest_pages(hyp_vm);
+       mc = &host_kvm->arch.pkvm.teardown_mc;
+       reclaim_guest_pages(hyp_vm, mc);
        unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
 
-       /* Return the metadata pages to the host */
+       /* Push the metadata pages to the teardown memcache */
        for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
                struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
 
-               unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
+               teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
        }
 
-       host_kvm = hyp_vm->host_kvm;
        vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
-       unmap_donated_memory(hyp_vm, vm_size);
+       teardown_donated_memory(mc, hyp_vm, vm_size);
        hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
        return 0;
 
index 8c443b915e439dfa925b4643cadd19ad3a0efbae..cf56958b1492a486f7e549c95d1f5ec828349a6b 100644 (file)
@@ -147,8 +147,6 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
        handle = ret;
 
        host_kvm->arch.pkvm.handle = handle;
-       host_kvm->arch.pkvm.hyp_donations.pgd = pgd;
-       host_kvm->arch.pkvm.hyp_donations.vm = hyp_vm;
 
        /* Donate memory for the vcpus at hyp and initialize it. */
        hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
@@ -167,12 +165,12 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
                        goto destroy_vm;
                }
 
-               host_kvm->arch.pkvm.hyp_donations.vcpus[idx] = hyp_vcpu;
-
                ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
                                        hyp_vcpu);
-               if (ret)
+               if (ret) {
+                       free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
                        goto destroy_vm;
+               }
        }
 
        return 0;
@@ -201,30 +199,13 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
 
 void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
 {
-       unsigned long idx, nr_vcpus = host_kvm->created_vcpus;
-       size_t pgd_sz, hyp_vm_sz;
-
-       if (host_kvm->arch.pkvm.handle)
+       if (host_kvm->arch.pkvm.handle) {
                WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
                                          host_kvm->arch.pkvm.handle));
-
-       host_kvm->arch.pkvm.handle = 0;
-
-       for (idx = 0; idx < nr_vcpus; ++idx) {
-               void *hyp_vcpu = host_kvm->arch.pkvm.hyp_donations.vcpus[idx];
-
-               if (!hyp_vcpu)
-                       break;
-
-               free_pages_exact(hyp_vcpu, PAGE_ALIGN(PKVM_HYP_VCPU_SIZE));
        }
 
-       hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
-                                       size_mul(sizeof(void *), nr_vcpus)));
-       pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
-
-       free_pages_exact(host_kvm->arch.pkvm.hyp_donations.vm, hyp_vm_sz);
-       free_pages_exact(host_kvm->arch.pkvm.hyp_donations.pgd, pgd_sz);
+       host_kvm->arch.pkvm.handle = 0;
+       free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
 }
 
 int pkvm_init_host_vm(struct kvm *host_kvm)