KVM: arm64: Add a range to __pkvm_host_unshare_guest()
authorVincent Donnefort <vdonnefort@google.com>
Wed, 21 May 2025 12:48:28 +0000 (13:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 21 May 2025 13:33:51 +0000 (14:33 +0100)
In preparation for supporting stage-2 huge mappings for np-guest. Add a
nr_pages argument to the __pkvm_host_unshare_guest hypercall. This range
supports only two values: 1 or PMD_SIZE / PAGE_SIZE (that is 512 on a
4K-pages system).

Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Link: https://lore.kernel.org/r/20250521124834.1070650-5-vdonnefort@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/pkvm.c

index 47aa7b01114ff7ed8ac6ec67383ea82f5fb79c6f..19671edbe18fac25d7248222cbdbf0155ade9b63 100644 (file)
@@ -41,7 +41,7 @@ int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
 int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu,
                            enum kvm_pgtable_prot prot);
-int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
+int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *hyp_vm);
 int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot);
 int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
 int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm);
index 4d3d215955c3252ce2d343ae1f7877b6f7c9fdad..5c03bd1db87373eff169423751730f592cbc1a6d 100644 (file)
@@ -270,6 +270,7 @@ static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
        DECLARE_REG(u64, gfn, host_ctxt, 2);
+       DECLARE_REG(u64, nr_pages, host_ctxt, 3);
        struct pkvm_hyp_vm *hyp_vm;
        int ret = -EINVAL;
 
@@ -280,7 +281,7 @@ static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
        if (!hyp_vm)
                goto out;
 
-       ret = __pkvm_host_unshare_guest(gfn, hyp_vm);
+       ret = __pkvm_host_unshare_guest(gfn, nr_pages, hyp_vm);
        put_pkvm_hyp_vm(hyp_vm);
 out:
        cpu_reg(host_ctxt, 1) =  ret;
index 8e0847aa090d26612759495f12f8b528f859fef9..884e2316aa48e863a49c4f42a45c758379f386d1 100644 (file)
@@ -980,10 +980,9 @@ unlock:
        return ret;
 }
 
-static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa)
+static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa, u64 size)
 {
        enum pkvm_page_state state;
-       struct hyp_page *page;
        kvm_pte_t pte;
        u64 phys;
        s8 level;
@@ -994,7 +993,7 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
                return ret;
        if (!kvm_pte_valid(pte))
                return -ENOENT;
-       if (level != KVM_PGTABLE_LAST_LEVEL)
+       if (kvm_granule_size(level) != size)
                return -E2BIG;
 
        state = guest_get_page_state(pte, ipa);
@@ -1002,43 +1001,49 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
                return -EPERM;
 
        phys = kvm_pte_to_phys(pte);
-       ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
+       ret = check_range_allowed_memory(phys, phys + size);
        if (WARN_ON(ret))
                return ret;
 
-       page = hyp_phys_to_page(phys);
-       if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED)
-               return -EPERM;
-       if (WARN_ON(!page->host_share_guest_count))
-               return -EINVAL;
+       for_each_hyp_page(page, phys, size) {
+               if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED)
+                       return -EPERM;
+               if (WARN_ON(!page->host_share_guest_count))
+                       return -EINVAL;
+       }
 
        *__phys = phys;
 
        return 0;
 }
 
-int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *vm)
+int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm)
 {
        u64 ipa = hyp_pfn_to_phys(gfn);
-       struct hyp_page *page;
-       u64 phys;
+       u64 size, phys;
        int ret;
 
+       ret = __guest_check_transition_size(0, ipa, nr_pages, &size);
+       if (ret)
+               return ret;
+
        host_lock_component();
        guest_lock_component(vm);
 
-       ret = __check_host_shared_guest(vm, &phys, ipa);
+       ret = __check_host_shared_guest(vm, &phys, ipa, size);
        if (ret)
                goto unlock;
 
-       ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE);
+       ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, size);
        if (ret)
                goto unlock;
 
-       page = hyp_phys_to_page(phys);
-       page->host_share_guest_count--;
-       if (!page->host_share_guest_count)
-               WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_OWNED));
+       for_each_hyp_page(page, phys, size) {
+               /* __check_host_shared_guest() protects against underflow */
+               page->host_share_guest_count--;
+               if (!page->host_share_guest_count)
+                       set_host_state(page, PKVM_PAGE_OWNED);
+       }
 
 unlock:
        guest_unlock_component(vm);
@@ -1058,7 +1063,7 @@ static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa)
        host_lock_component();
        guest_lock_component(vm);
 
-       ret = __check_host_shared_guest(vm, &phys, ipa);
+       ret = __check_host_shared_guest(vm, &phys, ipa, PAGE_SIZE);
 
        guest_unlock_component(vm);
        host_unlock_component();
@@ -1245,7 +1250,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   __pkvm_host_unshare_ffa, pfn, 1);
        assert_transition_res(-EPERM,   hyp_pin_shared_mem, virt, virt + size);
        assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
-       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, vm);
+       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, 1, vm);
 
        selftest_state.host = PKVM_PAGE_OWNED;
        selftest_state.hyp = PKVM_NOPAGE;
@@ -1253,7 +1258,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   __pkvm_hyp_donate_host, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_host_unshare_hyp, pfn);
        assert_transition_res(-EPERM,   __pkvm_host_unshare_ffa, pfn, 1);
-       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, vm);
+       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, 1, vm);
        assert_transition_res(-EPERM,   hyp_pin_shared_mem, virt, virt + size);
 
        selftest_state.host = PKVM_PAGE_SHARED_OWNED;
@@ -1264,7 +1269,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   __pkvm_host_share_ffa, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_hyp_donate_host, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
-       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, vm);
+       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, 1, vm);
 
        assert_transition_res(0,        hyp_pin_shared_mem, virt, virt + size);
        assert_transition_res(0,        hyp_pin_shared_mem, virt, virt + size);
@@ -1276,7 +1281,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   __pkvm_host_share_ffa, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_hyp_donate_host, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
-       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, vm);
+       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, 1, vm);
 
        hyp_unpin_shared_mem(virt, virt + size);
        assert_page_state();
@@ -1295,7 +1300,7 @@ void pkvm_ownership_selftest(void *base)
        assert_transition_res(-EPERM,   __pkvm_host_unshare_hyp, pfn);
        assert_transition_res(-EPERM,   __pkvm_hyp_donate_host, pfn, 1);
        assert_transition_res(-EPERM,   __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot);
-       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, vm);
+       assert_transition_res(-ENOENT,  __pkvm_host_unshare_guest, gfn, 1, vm);
        assert_transition_res(-EPERM,   hyp_pin_shared_mem, virt, virt + size);
 
        selftest_state.host = PKVM_PAGE_OWNED;
@@ -1319,11 +1324,11 @@ void pkvm_ownership_selftest(void *base)
        WARN_ON(hyp_virt_to_page(virt)->host_share_guest_count != 2);
 
        selftest_state.guest[0] = PKVM_NOPAGE;
-       assert_transition_res(0,        __pkvm_host_unshare_guest, gfn, vm);
+       assert_transition_res(0,        __pkvm_host_unshare_guest, gfn, 1, vm);
 
        selftest_state.guest[1] = PKVM_NOPAGE;
        selftest_state.host = PKVM_PAGE_OWNED;
-       assert_transition_res(0,        __pkvm_host_unshare_guest, gfn + 1, vm);
+       assert_transition_res(0,        __pkvm_host_unshare_guest, gfn + 1, 1, vm);
 
        selftest_state.host = PKVM_NOPAGE;
        selftest_state.hyp = PKVM_PAGE_OWNED;
index 987bc5fb18f9fe8ca6cf4600937e40f7232d6da5..0c5733be6bf400ebb44fbbe33779948baa248a69 100644 (file)
@@ -390,7 +390,7 @@ int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
 
        lockdep_assert_held_write(&kvm->mmu_lock);
        for_each_mapping_in_range_safe(pgt, addr, addr + size, mapping) {
-               ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn);
+               ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_guest, handle, mapping->gfn, 1);
                if (WARN_ON(ret))
                        break;
                rb_erase(&mapping->node, &pgt->pkvm_mappings);