1 // SPDX-License-Identifier: GPL-2.0-only
3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4 * No bombay mix was harmed in the writing of this file.
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
15 #define KVM_PTE_TYPE BIT(1)
16 #define KVM_PTE_TYPE_BLOCK 0
17 #define KVM_PTE_TYPE_PAGE 1
18 #define KVM_PTE_TYPE_TABLE 1
20 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
22 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
23 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
24 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \
25 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
26 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \
27 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
28 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
29 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
30 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
32 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
33 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
34 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
35 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
36 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
37 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
39 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
41 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
43 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
45 #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
47 #define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
49 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
50 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
51 KVM_PTE_LEAF_ATTR_HI_S2_XN)
53 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
54 #define KVM_MAX_OWNER_ID 1
57 * Used to indicate a pte for which a 'break-before-make' sequence is in
60 #define KVM_INVALID_PTE_LOCKED BIT(10)
62 struct kvm_pgtable_walk_data {
63 struct kvm_pgtable_walker *walker;
70 static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
72 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
75 static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
77 return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
80 static bool kvm_phys_is_valid(u64 phys)
82 u64 parange_max = kvm_get_parange_max();
83 u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
85 return phys < BIT(shift);
88 static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
90 u64 granule = kvm_granule_size(ctx->level);
92 if (!kvm_level_supports_block_mapping(ctx->level))
95 if (granule > (ctx->end - ctx->addr))
98 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
101 return IS_ALIGNED(ctx->addr, granule);
104 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
106 u64 shift = kvm_granule_shift(level);
107 u64 mask = BIT(PAGE_SHIFT - 3) - 1;
109 return (data->addr >> shift) & mask;
112 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
114 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
115 u64 mask = BIT(pgt->ia_bits) - 1;
117 return (addr & mask) >> shift;
120 static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
122 struct kvm_pgtable pgt = {
124 .start_level = start_level,
127 return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
130 static bool kvm_pte_table(kvm_pte_t pte, s8 level)
132 if (level == KVM_PGTABLE_LAST_LEVEL)
135 if (!kvm_pte_valid(pte))
138 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
141 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
143 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
146 static void kvm_clear_pte(kvm_pte_t *ptep)
148 WRITE_ONCE(*ptep, 0);
151 static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
153 kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
155 pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
156 pte |= KVM_PTE_VALID;
160 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
162 kvm_pte_t pte = kvm_phys_to_pte(pa);
163 u64 type = (level == KVM_PGTABLE_LAST_LEVEL) ? KVM_PTE_TYPE_PAGE :
166 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
167 pte |= FIELD_PREP(KVM_PTE_TYPE, type);
168 pte |= KVM_PTE_VALID;
173 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
175 return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
178 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data,
179 const struct kvm_pgtable_visit_ctx *ctx,
180 enum kvm_pgtable_walk_flags visit)
182 struct kvm_pgtable_walker *walker = data->walker;
184 /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
185 WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
186 return walker->cb(ctx, visit);
189 static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
193 * Visitor callbacks return EAGAIN when the conditions that led to a
194 * fault are no longer reflected in the page tables due to a race to
195 * update a PTE. In the context of a fault handler this is interpreted
196 * as a signal to retry guest execution.
198 * Ignore the return code altogether for walkers outside a fault handler
199 * (e.g. write protecting a range of memory) and chug along with the
203 return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT);
208 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
209 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level);
211 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
212 struct kvm_pgtable_mm_ops *mm_ops,
213 kvm_pteref_t pteref, s8 level)
215 enum kvm_pgtable_walk_flags flags = data->walker->flags;
216 kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
217 struct kvm_pgtable_visit_ctx ctx = {
219 .old = READ_ONCE(*ptep),
220 .arg = data->walker->arg,
222 .start = data->start,
231 bool table = kvm_pte_table(ctx.old, level);
233 if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
234 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
238 if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
239 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
244 * Reload the page table after invoking the walker callback for leaf
245 * entries or after pre-order traversal, to allow the walker to descend
246 * into a newly installed or replaced table.
249 ctx.old = READ_ONCE(*ptep);
250 table = kvm_pte_table(ctx.old, level);
253 if (!kvm_pgtable_walk_continue(data->walker, ret))
257 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
258 data->addr += kvm_granule_size(level);
262 childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
263 ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
264 if (!kvm_pgtable_walk_continue(data->walker, ret))
267 if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
268 ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
271 if (kvm_pgtable_walk_continue(data->walker, ret))
277 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
278 struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
283 if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL ||
284 level > KVM_PGTABLE_LAST_LEVEL))
287 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
288 kvm_pteref_t pteref = &pgtable[idx];
290 if (data->addr >= data->end)
293 ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
301 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
305 u64 limit = BIT(pgt->ia_bits);
307 if (data->addr > limit || data->end > limit)
313 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
314 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
316 ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
324 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
325 struct kvm_pgtable_walker *walker)
327 struct kvm_pgtable_walk_data walk_data = {
328 .start = ALIGN_DOWN(addr, PAGE_SIZE),
329 .addr = ALIGN_DOWN(addr, PAGE_SIZE),
330 .end = PAGE_ALIGN(walk_data.addr + size),
335 r = kvm_pgtable_walk_begin(walker);
339 r = _kvm_pgtable_walk(pgt, &walk_data);
340 kvm_pgtable_walk_end(walker);
345 struct leaf_walk_data {
350 static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
351 enum kvm_pgtable_walk_flags visit)
353 struct leaf_walk_data *data = ctx->arg;
355 data->pte = ctx->old;
356 data->level = ctx->level;
361 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
362 kvm_pte_t *ptep, s8 *level)
364 struct leaf_walk_data data;
365 struct kvm_pgtable_walker walker = {
367 .flags = KVM_PGTABLE_WALK_LEAF,
372 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
384 struct hyp_map_data {
389 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
391 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
392 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
393 kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
394 u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
395 u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
396 KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
398 if (!(prot & KVM_PGTABLE_PROT_R))
401 if (prot & KVM_PGTABLE_PROT_X) {
402 if (prot & KVM_PGTABLE_PROT_W)
408 if (system_supports_bti_kernel())
409 attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP;
411 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
414 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
415 if (!kvm_lpa2_is_enabled())
416 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
417 attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
418 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
424 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
426 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
429 if (!kvm_pte_valid(pte))
432 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
433 prot |= KVM_PGTABLE_PROT_X;
435 ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
436 if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RO)
437 prot |= KVM_PGTABLE_PROT_R;
438 else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
439 prot |= KVM_PGTABLE_PROT_RW;
444 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
445 struct hyp_map_data *data)
447 u64 phys = data->phys + (ctx->addr - ctx->start);
450 if (!kvm_block_mapping_supported(ctx, phys))
453 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
456 if (!kvm_pte_valid(ctx->old))
457 ctx->mm_ops->get_page(ctx->ptep);
458 else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
461 smp_store_release(ctx->ptep, new);
465 static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
466 enum kvm_pgtable_walk_flags visit)
468 kvm_pte_t *childp, new;
469 struct hyp_map_data *data = ctx->arg;
470 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
472 if (hyp_map_walker_try_leaf(ctx, data))
475 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
478 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
482 new = kvm_init_table_pte(childp, mm_ops);
483 mm_ops->get_page(ctx->ptep);
484 smp_store_release(ctx->ptep, new);
489 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
490 enum kvm_pgtable_prot prot)
493 struct hyp_map_data map_data = {
494 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
496 struct kvm_pgtable_walker walker = {
497 .cb = hyp_map_walker,
498 .flags = KVM_PGTABLE_WALK_LEAF,
502 ret = hyp_set_prot_attr(prot, &map_data.attr);
506 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
512 static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
513 enum kvm_pgtable_walk_flags visit)
515 kvm_pte_t *childp = NULL;
516 u64 granule = kvm_granule_size(ctx->level);
517 u64 *unmapped = ctx->arg;
518 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
520 if (!kvm_pte_valid(ctx->old))
523 if (kvm_pte_table(ctx->old, ctx->level)) {
524 childp = kvm_pte_follow(ctx->old, mm_ops);
526 if (mm_ops->page_count(childp) != 1)
529 kvm_clear_pte(ctx->ptep);
531 __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
533 if (ctx->end - ctx->addr < granule)
536 kvm_clear_pte(ctx->ptep);
538 __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
539 *unmapped += granule;
544 mm_ops->put_page(ctx->ptep);
547 mm_ops->put_page(childp);
552 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
555 struct kvm_pgtable_walker walker = {
556 .cb = hyp_unmap_walker,
558 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
561 if (!pgt->mm_ops->page_count)
564 kvm_pgtable_walk(pgt, addr, size, &walker);
568 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
569 struct kvm_pgtable_mm_ops *mm_ops)
571 s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 -
572 ARM64_HW_PGTABLE_LEVELS(va_bits);
574 if (start_level < KVM_PGTABLE_FIRST_LEVEL ||
575 start_level > KVM_PGTABLE_LAST_LEVEL)
578 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
582 pgt->ia_bits = va_bits;
583 pgt->start_level = start_level;
584 pgt->mm_ops = mm_ops;
586 pgt->force_pte_cb = NULL;
591 static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
592 enum kvm_pgtable_walk_flags visit)
594 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
596 if (!kvm_pte_valid(ctx->old))
599 mm_ops->put_page(ctx->ptep);
601 if (kvm_pte_table(ctx->old, ctx->level))
602 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
607 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
609 struct kvm_pgtable_walker walker = {
610 .cb = hyp_free_walker,
611 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
614 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
615 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
619 struct stage2_map_data {
627 struct kvm_s2_mmu *mmu;
630 /* Force mappings to page granularity */
634 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
636 u64 vtcr = VTCR_EL2_FLAGS;
639 vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
640 vtcr |= VTCR_EL2_T0SZ(phys_shift);
642 * Use a minimum 2 level page table to prevent splitting
643 * host PMD huge pages at stage2.
645 lvls = stage2_pgtable_levels(phys_shift);
650 * When LPA2 is enabled, the HW supports an extra level of translation
651 * (for 5 in total) when using 4K pages. It also introduces VTCR_EL2.SL2
652 * to as an addition to SL0 to enable encoding this extra start level.
653 * However, since we always use concatenated pages for the first level
654 * lookup, we will never need this extra level and therefore do not need
657 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
659 #ifdef CONFIG_ARM64_HW_AFDBM
661 * Enable the Hardware Access Flag management, unconditionally
662 * on all CPUs. In systems that have asymmetric support for the feature
663 * this allows KVM to leverage hardware support on the subset of cores
664 * that implement the feature.
666 * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
667 * hardware) on implementations that do not advertise support for the
668 * feature. As such, setting HA unconditionally is safe, unless you
669 * happen to be running on a design that has unadvertised support for
670 * HAFDBS. Here be dragons.
672 if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
674 #endif /* CONFIG_ARM64_HW_AFDBM */
676 if (kvm_lpa2_is_enabled())
679 /* Set the vmid bits */
680 vtcr |= (get_vmid_bits(mmfr1) == 16) ?
687 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
689 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
692 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
695 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
696 phys_addr_t addr, size_t size)
698 unsigned long pages, inval_pages;
700 if (!system_supports_tlb_range()) {
701 kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
705 pages = size >> PAGE_SHIFT;
707 inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
708 kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages);
710 addr += inval_pages << PAGE_SHIFT;
711 pages -= inval_pages;
715 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
717 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
721 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
723 switch (prot & (KVM_PGTABLE_PROT_DEVICE |
724 KVM_PGTABLE_PROT_NORMAL_NC)) {
725 case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC:
727 case KVM_PGTABLE_PROT_DEVICE:
728 if (prot & KVM_PGTABLE_PROT_X)
730 attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
732 case KVM_PGTABLE_PROT_NORMAL_NC:
733 if (prot & KVM_PGTABLE_PROT_X)
735 attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
738 attr = KVM_S2_MEMATTR(pgt, NORMAL);
741 if (!(prot & KVM_PGTABLE_PROT_X))
742 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
744 if (prot & KVM_PGTABLE_PROT_R)
745 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
747 if (prot & KVM_PGTABLE_PROT_W)
748 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
750 if (!kvm_lpa2_is_enabled())
751 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
753 attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
754 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
760 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
762 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
764 if (!kvm_pte_valid(pte))
767 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R)
768 prot |= KVM_PGTABLE_PROT_R;
769 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
770 prot |= KVM_PGTABLE_PROT_W;
771 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
772 prot |= KVM_PGTABLE_PROT_X;
777 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
779 if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
782 return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
785 static bool stage2_pte_is_counted(kvm_pte_t pte)
788 * The refcount tracks valid entries as well as invalid entries if they
789 * encode ownership of a page to another entity than the page-table
790 * owner, whose id is 0.
795 static bool stage2_pte_is_locked(kvm_pte_t pte)
797 return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
800 static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
802 if (!kvm_pgtable_walk_shared(ctx)) {
803 WRITE_ONCE(*ctx->ptep, new);
807 return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
811 * stage2_try_break_pte() - Invalidates a pte according to the
812 * 'break-before-make' requirements of the
815 * @ctx: context of the visited pte.
818 * Returns: true if the pte was successfully broken.
820 * If the removed pte was valid, performs the necessary serialization and TLB
821 * invalidation for the old value. For counted ptes, drops the reference count
822 * on the containing table page.
824 static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
825 struct kvm_s2_mmu *mmu)
827 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
829 if (stage2_pte_is_locked(ctx->old)) {
831 * Should never occur if this walker has exclusive access to the
834 WARN_ON(!kvm_pgtable_walk_shared(ctx));
838 if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
841 if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
843 * Perform the appropriate TLB invalidation based on the
844 * evicted pte value (if any).
846 if (kvm_pte_table(ctx->old, ctx->level))
847 kvm_tlb_flush_vmid_range(mmu, ctx->addr,
848 kvm_granule_size(ctx->level));
849 else if (kvm_pte_valid(ctx->old))
850 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
851 ctx->addr, ctx->level);
854 if (stage2_pte_is_counted(ctx->old))
855 mm_ops->put_page(ctx->ptep);
860 static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
862 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
864 WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
866 if (stage2_pte_is_counted(new))
867 mm_ops->get_page(ctx->ptep);
869 smp_store_release(ctx->ptep, new);
872 static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
875 * If FEAT_TLBIRANGE is implemented, defer the individual
876 * TLB invalidations until the entire walk is finished, and
877 * then use the range-based TLBI instructions to do the
878 * invalidations. Condition deferred TLB invalidation on the
879 * system supporting FWB as the optimization is entirely
880 * pointless when the unmap walker needs to perform CMOs.
882 return system_supports_tlb_range() && stage2_has_fwb(pgt);
885 static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
886 struct kvm_s2_mmu *mmu,
887 struct kvm_pgtable_mm_ops *mm_ops)
889 struct kvm_pgtable *pgt = ctx->arg;
892 * Clear the existing PTE, and perform break-before-make if it was
893 * valid. Depending on the system support, defer the TLB maintenance
894 * for the same until the entire unmap walk is completed.
896 if (kvm_pte_valid(ctx->old)) {
897 kvm_clear_pte(ctx->ptep);
899 if (!stage2_unmap_defer_tlb_flush(pgt))
900 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
901 ctx->addr, ctx->level);
904 mm_ops->put_page(ctx->ptep);
907 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
909 u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
910 return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
913 static bool stage2_pte_executable(kvm_pte_t pte)
915 return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
918 static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
919 const struct stage2_map_data *data)
921 u64 phys = data->phys;
924 * Stage-2 walks to update ownership data are communicated to the map
925 * walker using an invalid PA. Avoid offsetting an already invalid PA,
926 * which could overflow and make the address valid again.
928 if (!kvm_phys_is_valid(phys))
932 * Otherwise, work out the correct PA based on how far the walk has
935 return phys + (ctx->addr - ctx->start);
938 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
939 struct stage2_map_data *data)
941 u64 phys = stage2_map_walker_phys_addr(ctx, data);
943 if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
946 return kvm_block_mapping_supported(ctx, phys);
949 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
950 struct stage2_map_data *data)
953 u64 phys = stage2_map_walker_phys_addr(ctx, data);
954 u64 granule = kvm_granule_size(ctx->level);
955 struct kvm_pgtable *pgt = data->mmu->pgt;
956 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
958 if (!stage2_leaf_mapping_allowed(ctx, data))
961 if (kvm_phys_is_valid(phys))
962 new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
964 new = kvm_init_invalid_leaf_owner(data->owner_id);
967 * Skip updating the PTE if we are trying to recreate the exact
968 * same mapping or only change the access permissions. Instead,
969 * the vCPU will exit one more time from guest if still needed
970 * and then go through the path of relaxing permissions.
972 if (!stage2_pte_needs_update(ctx->old, new))
975 if (!stage2_try_break_pte(ctx, data->mmu))
978 /* Perform CMOs before installation of the guest stage-2 PTE */
979 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
980 stage2_pte_cacheable(pgt, new))
981 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
984 if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
985 stage2_pte_executable(new))
986 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
988 stage2_make_pte(ctx, new);
993 static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
994 struct stage2_map_data *data)
996 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
997 kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
1000 if (!stage2_leaf_mapping_allowed(ctx, data))
1003 ret = stage2_map_walker_try_leaf(ctx, data);
1007 mm_ops->free_unlinked_table(childp, ctx->level);
1011 static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
1012 struct stage2_map_data *data)
1014 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1015 kvm_pte_t *childp, new;
1018 ret = stage2_map_walker_try_leaf(ctx, data);
1022 if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
1025 if (!data->memcache)
1028 childp = mm_ops->zalloc_page(data->memcache);
1032 if (!stage2_try_break_pte(ctx, data->mmu)) {
1033 mm_ops->put_page(childp);
1038 * If we've run into an existing block mapping then replace it with
1039 * a table. Accesses beyond 'end' that fall within the new table
1040 * will be mapped lazily.
1042 new = kvm_init_table_pte(childp, mm_ops);
1043 stage2_make_pte(ctx, new);
1049 * The TABLE_PRE callback runs for table entries on the way down, looking
1050 * for table entries which we could conceivably replace with a block entry
1051 * for this mapping. If it finds one it replaces the entry and calls
1052 * kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table.
1054 * Otherwise, the LEAF callback performs the mapping at the existing leaves
1057 static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
1058 enum kvm_pgtable_walk_flags visit)
1060 struct stage2_map_data *data = ctx->arg;
1063 case KVM_PGTABLE_WALK_TABLE_PRE:
1064 return stage2_map_walk_table_pre(ctx, data);
1065 case KVM_PGTABLE_WALK_LEAF:
1066 return stage2_map_walk_leaf(ctx, data);
1072 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
1073 u64 phys, enum kvm_pgtable_prot prot,
1074 void *mc, enum kvm_pgtable_walk_flags flags)
1077 struct stage2_map_data map_data = {
1078 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
1081 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
1083 struct kvm_pgtable_walker walker = {
1084 .cb = stage2_map_walker,
1086 KVM_PGTABLE_WALK_TABLE_PRE |
1087 KVM_PGTABLE_WALK_LEAF,
1091 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
1094 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1098 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1103 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
1104 void *mc, u8 owner_id)
1107 struct stage2_map_data map_data = {
1108 .phys = KVM_PHYS_INVALID,
1111 .owner_id = owner_id,
1114 struct kvm_pgtable_walker walker = {
1115 .cb = stage2_map_walker,
1116 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
1117 KVM_PGTABLE_WALK_LEAF,
1121 if (owner_id > KVM_MAX_OWNER_ID)
1124 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1128 static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
1129 enum kvm_pgtable_walk_flags visit)
1131 struct kvm_pgtable *pgt = ctx->arg;
1132 struct kvm_s2_mmu *mmu = pgt->mmu;
1133 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1134 kvm_pte_t *childp = NULL;
1135 bool need_flush = false;
1137 if (!kvm_pte_valid(ctx->old)) {
1138 if (stage2_pte_is_counted(ctx->old)) {
1139 kvm_clear_pte(ctx->ptep);
1140 mm_ops->put_page(ctx->ptep);
1145 if (kvm_pte_table(ctx->old, ctx->level)) {
1146 childp = kvm_pte_follow(ctx->old, mm_ops);
1148 if (mm_ops->page_count(childp) != 1)
1150 } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1151 need_flush = !stage2_has_fwb(pgt);
1155 * This is similar to the map() path in that we unmap the entire
1156 * block entry and rely on the remaining portions being faulted
1159 stage2_unmap_put_pte(ctx, mmu, mm_ops);
1161 if (need_flush && mm_ops->dcache_clean_inval_poc)
1162 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1163 kvm_granule_size(ctx->level));
1166 mm_ops->put_page(childp);
1171 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
1174 struct kvm_pgtable_walker walker = {
1175 .cb = stage2_unmap_walker,
1177 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
1180 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1181 if (stage2_unmap_defer_tlb_flush(pgt))
1182 /* Perform the deferred TLB invalidations */
1183 kvm_tlb_flush_vmid_range(pgt->mmu, addr, size);
1188 struct stage2_attr_data {
1195 static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
1196 enum kvm_pgtable_walk_flags visit)
1198 kvm_pte_t pte = ctx->old;
1199 struct stage2_attr_data *data = ctx->arg;
1200 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1202 if (!kvm_pte_valid(ctx->old))
1205 data->level = ctx->level;
1207 pte &= ~data->attr_clr;
1208 pte |= data->attr_set;
1211 * We may race with the CPU trying to set the access flag here,
1212 * but worst-case the access flag update gets lost and will be
1213 * set on the next access instead.
1215 if (data->pte != pte) {
1217 * Invalidate instruction cache before updating the guest
1218 * stage-2 PTE if we are going to add executable permission.
1220 if (mm_ops->icache_inval_pou &&
1221 stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
1222 mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
1223 kvm_granule_size(ctx->level));
1225 if (!stage2_try_set_pte(ctx, pte))
1232 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
1233 u64 size, kvm_pte_t attr_set,
1234 kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
1235 s8 *level, enum kvm_pgtable_walk_flags flags)
1238 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
1239 struct stage2_attr_data data = {
1240 .attr_set = attr_set & attr_mask,
1241 .attr_clr = attr_clr & attr_mask,
1243 struct kvm_pgtable_walker walker = {
1244 .cb = stage2_attr_walker,
1246 .flags = flags | KVM_PGTABLE_WALK_LEAF,
1249 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1254 *orig_pte = data.pte;
1257 *level = data.level;
1261 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
1263 return stage2_update_leaf_attrs(pgt, addr, size, 0,
1264 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
1268 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
1273 ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
1275 KVM_PGTABLE_WALK_HANDLE_FAULT |
1276 KVM_PGTABLE_WALK_SHARED);
1283 struct stage2_age_data {
1288 static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
1289 enum kvm_pgtable_walk_flags visit)
1291 kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
1292 struct stage2_age_data *data = ctx->arg;
1294 if (!kvm_pte_valid(ctx->old) || new == ctx->old)
1300 * stage2_age_walker() is always called while holding the MMU lock for
1301 * write, so this will always succeed. Nonetheless, this deliberately
1302 * follows the race detection pattern of the other stage-2 walkers in
1303 * case the locking mechanics of the MMU notifiers is ever changed.
1305 if (data->mkold && !stage2_try_set_pte(ctx, new))
1309 * "But where's the TLBI?!", you scream.
1310 * "Over in the core code", I sigh.
1312 * See the '->clear_flush_young()' callback on the KVM mmu notifier.
1317 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
1318 u64 size, bool mkold)
1320 struct stage2_age_data data = {
1323 struct kvm_pgtable_walker walker = {
1324 .cb = stage2_age_walker,
1326 .flags = KVM_PGTABLE_WALK_LEAF,
1329 WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1333 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
1334 enum kvm_pgtable_prot prot)
1338 kvm_pte_t set = 0, clr = 0;
1340 if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
1343 if (prot & KVM_PGTABLE_PROT_R)
1344 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
1346 if (prot & KVM_PGTABLE_PROT_W)
1347 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
1349 if (prot & KVM_PGTABLE_PROT_X)
1350 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
1352 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
1353 KVM_PGTABLE_WALK_HANDLE_FAULT |
1354 KVM_PGTABLE_WALK_SHARED);
1355 if (!ret || ret == -EAGAIN)
1356 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
1360 static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
1361 enum kvm_pgtable_walk_flags visit)
1363 struct kvm_pgtable *pgt = ctx->arg;
1364 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1366 if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
1369 if (mm_ops->dcache_clean_inval_poc)
1370 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1371 kvm_granule_size(ctx->level));
1375 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1377 struct kvm_pgtable_walker walker = {
1378 .cb = stage2_flush_walker,
1379 .flags = KVM_PGTABLE_WALK_LEAF,
1383 if (stage2_has_fwb(pgt))
1386 return kvm_pgtable_walk(pgt, addr, size, &walker);
1389 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
1391 enum kvm_pgtable_prot prot,
1392 void *mc, bool force_pte)
1394 struct stage2_map_data map_data = {
1398 .force_pte = force_pte,
1400 struct kvm_pgtable_walker walker = {
1401 .cb = stage2_map_walker,
1402 .flags = KVM_PGTABLE_WALK_LEAF |
1403 KVM_PGTABLE_WALK_SKIP_BBM_TLBI |
1404 KVM_PGTABLE_WALK_SKIP_CMO,
1408 * The input address (.addr) is irrelevant for walking an
1409 * unlinked table. Construct an ambiguous IA range to map
1410 * kvm_granule_size(level) worth of memory.
1412 struct kvm_pgtable_walk_data data = {
1415 .end = kvm_granule_size(level),
1417 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1421 if (!IS_ALIGNED(phys, kvm_granule_size(level)))
1422 return ERR_PTR(-EINVAL);
1424 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1426 return ERR_PTR(ret);
1428 pgtable = mm_ops->zalloc_page(mc);
1430 return ERR_PTR(-ENOMEM);
1432 ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable,
1435 kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
1436 return ERR_PTR(ret);
1443 * Get the number of page-tables needed to replace a block with a
1444 * fully populated tree up to the PTE entries. Note that @level is
1445 * interpreted as in "level @level entry".
1447 static int stage2_block_get_nr_page_tables(s8 level)
1451 return PTRS_PER_PTE + 1;
1457 WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
1458 level > KVM_PGTABLE_LAST_LEVEL);
1463 static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx,
1464 enum kvm_pgtable_walk_flags visit)
1466 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1467 struct kvm_mmu_memory_cache *mc = ctx->arg;
1468 struct kvm_s2_mmu *mmu;
1469 kvm_pte_t pte = ctx->old, new, *childp;
1470 enum kvm_pgtable_prot prot;
1471 s8 level = ctx->level;
1476 /* No huge-pages exist at the last level */
1477 if (level == KVM_PGTABLE_LAST_LEVEL)
1480 /* We only split valid block mappings */
1481 if (!kvm_pte_valid(pte))
1484 nr_pages = stage2_block_get_nr_page_tables(level);
1488 if (mc->nobjs >= nr_pages) {
1489 /* Build a tree mapped down to the PTE granularity. */
1493 * Don't force PTEs, so create_unlinked() below does
1494 * not populate the tree up to the PTE level. The
1495 * consequence is that the call will require a single
1496 * page of level 2 entries at level 1, or a single
1497 * page of PTEs at level 2. If we are at level 1, the
1498 * PTEs will be created recursively.
1504 if (mc->nobjs < nr_pages)
1507 mmu = container_of(mc, struct kvm_s2_mmu, split_page_cache);
1508 phys = kvm_pte_to_phys(pte);
1509 prot = kvm_pgtable_stage2_pte_prot(pte);
1511 childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys,
1512 level, prot, mc, force_pte);
1514 return PTR_ERR(childp);
1516 if (!stage2_try_break_pte(ctx, mmu)) {
1517 kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
1522 * Note, the contents of the page table are guaranteed to be made
1523 * visible before the new PTE is assigned because stage2_make_pte()
1524 * writes the PTE using smp_store_release().
1526 new = kvm_init_table_pte(childp, mm_ops);
1527 stage2_make_pte(ctx, new);
1532 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
1533 struct kvm_mmu_memory_cache *mc)
1535 struct kvm_pgtable_walker walker = {
1536 .cb = stage2_split_walker,
1537 .flags = KVM_PGTABLE_WALK_LEAF,
1541 return kvm_pgtable_walk(pgt, addr, size, &walker);
1544 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
1545 struct kvm_pgtable_mm_ops *mm_ops,
1546 enum kvm_pgtable_stage2_flags flags,
1547 kvm_pgtable_force_pte_cb_t force_pte_cb)
1550 u64 vtcr = mmu->vtcr;
1551 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1552 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1553 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1555 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1556 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1560 pgt->ia_bits = ia_bits;
1561 pgt->start_level = start_level;
1562 pgt->mm_ops = mm_ops;
1565 pgt->force_pte_cb = force_pte_cb;
1567 /* Ensure zeroed PGD pages are visible to the hardware walker */
1572 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
1574 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1575 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1576 s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1578 return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1581 static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
1582 enum kvm_pgtable_walk_flags visit)
1584 struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1586 if (!stage2_pte_is_counted(ctx->old))
1589 mm_ops->put_page(ctx->ptep);
1591 if (kvm_pte_table(ctx->old, ctx->level))
1592 mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
1597 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1600 struct kvm_pgtable_walker walker = {
1601 .cb = stage2_free_walker,
1602 .flags = KVM_PGTABLE_WALK_LEAF |
1603 KVM_PGTABLE_WALK_TABLE_POST,
1606 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1607 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1608 pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
1612 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
1614 kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
1615 struct kvm_pgtable_walker walker = {
1616 .cb = stage2_free_walker,
1617 .flags = KVM_PGTABLE_WALK_LEAF |
1618 KVM_PGTABLE_WALK_TABLE_POST,
1620 struct kvm_pgtable_walk_data data = {
1624 * At this point the IPA really doesn't matter, as the page
1625 * table being traversed has already been removed from the stage
1626 * 2. Set an appropriate range to cover the entire page table.
1629 .end = kvm_granule_size(level),
1632 WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
1634 WARN_ON(mm_ops->page_count(pgtable) != 1);
1635 mm_ops->put_page(pgtable);