KVM: arm64: Introduce for_each_hyp_page
authorVincent Donnefort <vdonnefort@google.com>
Wed, 21 May 2025 12:48:26 +0000 (13:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 21 May 2025 13:33:51 +0000 (14:33 +0100)
Add a helper to iterate over the hypervisor vmemmap. This will be
particularly handy with the introduction of huge mapping support
for the np-guest stage-2.

Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
Link: https://lore.kernel.org/r/20250521124834.1070650-3-vdonnefort@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/hyp/include/nvhe/memory.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/setup.c

index eb0c2ebd174321fc88821dc0259775855cc22ea3..dee1a406b0c28c6eb959bdf7c858d48a784442f5 100644 (file)
@@ -96,24 +96,24 @@ static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
 #define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
 #define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
 
-static inline enum pkvm_page_state get_host_state(phys_addr_t phys)
+static inline enum pkvm_page_state get_host_state(struct hyp_page *p)
 {
-       return (enum pkvm_page_state)hyp_phys_to_page(phys)->__host_state;
+       return p->__host_state;
 }
 
-static inline void set_host_state(phys_addr_t phys, enum pkvm_page_state state)
+static inline void set_host_state(struct hyp_page *p, enum pkvm_page_state state)
 {
-       hyp_phys_to_page(phys)->__host_state = state;
+       p->__host_state = state;
 }
 
-static inline enum pkvm_page_state get_hyp_state(phys_addr_t phys)
+static inline enum pkvm_page_state get_hyp_state(struct hyp_page *p)
 {
-       return hyp_phys_to_page(phys)->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
+       return p->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
 }
 
-static inline void set_hyp_state(phys_addr_t phys, enum pkvm_page_state state)
+static inline void set_hyp_state(struct hyp_page *p, enum pkvm_page_state state)
 {
-       hyp_phys_to_page(phys)->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
+       p->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
 }
 
 /*
index be4f7c5612f8c27404c071597dedc90e7fd0ca95..1018a6f66359171c6179f64c5becef88fa3388aa 100644 (file)
@@ -60,6 +60,11 @@ static void hyp_unlock_component(void)
        hyp_spin_unlock(&pkvm_pgd_lock);
 }
 
+#define for_each_hyp_page(__p, __st, __sz)                             \
+       for (struct hyp_page *__p = hyp_phys_to_page(__st),             \
+                            *__e = __p + ((__sz) >> PAGE_SHIFT);       \
+            __p < __e; __p++)
+
 static void *host_s2_zalloc_pages_exact(size_t size)
 {
        void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
@@ -485,7 +490,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
                return -EAGAIN;
 
        if (pte) {
-               WARN_ON(addr_is_memory(addr) && get_host_state(addr) != PKVM_NOPAGE);
+               WARN_ON(addr_is_memory(addr) &&
+                       get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE);
                return -EPERM;
        }
 
@@ -511,10 +517,8 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
 
 static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state)
 {
-       phys_addr_t end = addr + size;
-
-       for (; addr < end; addr += PAGE_SIZE)
-               set_host_state(addr, state);
+       for_each_hyp_page(page, addr, size)
+               set_host_state(page, state);
 }
 
 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
@@ -636,16 +640,16 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
 static int __host_check_page_state_range(u64 addr, u64 size,
                                         enum pkvm_page_state state)
 {
-       u64 end = addr + size;
        int ret;
 
-       ret = check_range_allowed_memory(addr, end);
+       ret = check_range_allowed_memory(addr, addr + size);
        if (ret)
                return ret;
 
        hyp_assert_lock_held(&host_mmu.lock);
-       for (; addr < end; addr += PAGE_SIZE) {
-               if (get_host_state(addr) != state)
+
+       for_each_hyp_page(page, addr, size) {
+               if (get_host_state(page) != state)
                        return -EPERM;
        }
 
@@ -655,7 +659,7 @@ static int __host_check_page_state_range(u64 addr, u64 size,
 static int __host_set_page_state_range(u64 addr, u64 size,
                                       enum pkvm_page_state state)
 {
-       if (get_host_state(addr) == PKVM_NOPAGE) {
+       if (get_host_state(hyp_phys_to_page(addr)) == PKVM_NOPAGE) {
                int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT);
 
                if (ret)
@@ -669,18 +673,14 @@ static int __host_set_page_state_range(u64 addr, u64 size,
 
 static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
 {
-       phys_addr_t end = phys + size;
-
-       for (; phys < end; phys += PAGE_SIZE)
-               set_hyp_state(phys, state);
+       for_each_hyp_page(page, phys, size)
+               set_hyp_state(page, state);
 }
 
 static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state)
 {
-       phys_addr_t end = phys + size;
-
-       for (; phys < end; phys += PAGE_SIZE) {
-               if (get_hyp_state(phys) != state)
+       for_each_hyp_page(page, phys, size) {
+               if (get_hyp_state(page) != state)
                        return -EPERM;
        }
 
@@ -931,7 +931,7 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
                goto unlock;
 
        page = hyp_phys_to_page(phys);
-       switch (get_host_state(phys)) {
+       switch (get_host_state(page)) {
        case PKVM_PAGE_OWNED:
                WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED));
                break;
@@ -983,9 +983,9 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
        if (WARN_ON(ret))
                return ret;
 
-       if (get_host_state(phys) != PKVM_PAGE_SHARED_OWNED)
-               return -EPERM;
        page = hyp_phys_to_page(phys);
+       if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED)
+               return -EPERM;
        if (WARN_ON(!page->host_share_guest_count))
                return -EINVAL;
 
index 6d513a4b3763caf1f01422f9169f29e3dcab26e4..c19860fc818362e5cfd335d1677fa4780f696a5e 100644 (file)
@@ -190,6 +190,7 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
                                     enum kvm_pgtable_walk_flags visit)
 {
        enum pkvm_page_state state;
+       struct hyp_page *page;
        phys_addr_t phys;
 
        if (!kvm_pte_valid(ctx->old))
@@ -202,6 +203,8 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
        if (!addr_is_memory(phys))
                return -EINVAL;
 
+       page = hyp_phys_to_page(phys);
+
        /*
         * Adjust the host stage-2 mappings to match the ownership attributes
         * configured in the hypervisor stage-1, and make sure to propagate them
@@ -210,15 +213,15 @@ static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
        state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
        switch (state) {
        case PKVM_PAGE_OWNED:
-               set_hyp_state(phys, PKVM_PAGE_OWNED);
+               set_hyp_state(page, PKVM_PAGE_OWNED);
                return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
        case PKVM_PAGE_SHARED_OWNED:
-               set_hyp_state(phys, PKVM_PAGE_SHARED_OWNED);
-               set_host_state(phys, PKVM_PAGE_SHARED_BORROWED);
+               set_hyp_state(page, PKVM_PAGE_SHARED_OWNED);
+               set_host_state(page, PKVM_PAGE_SHARED_BORROWED);
                break;
        case PKVM_PAGE_SHARED_BORROWED:
-               set_hyp_state(phys, PKVM_PAGE_SHARED_BORROWED);
-               set_host_state(phys, PKVM_PAGE_SHARED_OWNED);
+               set_hyp_state(page, PKVM_PAGE_SHARED_BORROWED);
+               set_host_state(page, PKVM_PAGE_SHARED_OWNED);
                break;
        default:
                return -EINVAL;