KVM: arm64: Rework kvm_pgtable initialisation
authorMarc Zyngier <maz@kernel.org>
Mon, 29 Nov 2021 20:00:45 +0000 (20:00 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 16 Dec 2021 17:01:05 +0000 (17:01 +0000)
Ganapatrao reported that the kvm_pgtable->mmu pointer is more or
less hardcoded to the main S2 mmu structure, while the nested
code needs it to point to other instances (as we have one instance
per nested context).

Rework the initialisation of the kvm_pgtable structure so that
this assumtion doesn't hold true anymore. This requires some
minor changes to the order in which things are initialised
(the mmu->arch pointer being the critical one).

Reported-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Reviewed-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211129200150.351436-5-maz@kernel.org
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/mmu.c

index 0277838295840c6ee2c4d4b79040034f2abc37fb..8b808e70c2b2d4366206bb579ad9b479b791635d 100644 (file)
@@ -270,8 +270,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
 /**
  * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
  * @pgt:       Uninitialised page-table structure to initialise.
- * @arch:      Arch-specific KVM structure representing the guest virtual
- *             machine.
+ * @mmu:       S2 MMU context for this S2 translation
  * @mm_ops:    Memory management callbacks.
  * @flags:     Stage-2 configuration flags.
  * @force_pte_cb: Function that returns true if page level mappings must
@@ -279,13 +278,13 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
  *
  * Return: 0 on success, negative error code on failure.
  */
-int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
+int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
                              struct kvm_pgtable_mm_ops *mm_ops,
                              enum kvm_pgtable_stage2_flags flags,
                              kvm_pgtable_force_pte_cb_t force_pte_cb);
 
-#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \
-       __kvm_pgtable_stage2_init(pgt, arch, mm_ops, 0, NULL)
+#define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
+       __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
 
 /**
  * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
index c1a90dd022b8c151bf30152e7ddbfccf12676d19..7d7b7037dc68c8a9774e69493d9b401d6d38e938 100644 (file)
@@ -103,19 +103,19 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
 
        prepare_host_vtcr();
        hyp_spin_lock_init(&host_kvm.lock);
+       mmu->arch = &host_kvm.arch;
 
        ret = prepare_s2_pool(pgt_pool_base);
        if (ret)
                return ret;
 
-       ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch,
+       ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
                                        &host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
                                        host_stage2_force_pte_cb);
        if (ret)
                return ret;
 
        mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
-       mmu->arch = &host_kvm.arch;
        mmu->pgt = &host_kvm.pgt;
        WRITE_ONCE(mmu->vmid.vmid_gen, 0);
        WRITE_ONCE(mmu->vmid.vmid, 0);
index f8ceebe4982eb130b884800bfea771cd1353d4db..8cdbc43fa6514ec3df820a4d5fa0fbd7a98cbbd2 100644 (file)
@@ -1116,13 +1116,13 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
 }
 
 
-int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
+int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
                              struct kvm_pgtable_mm_ops *mm_ops,
                              enum kvm_pgtable_stage2_flags flags,
                              kvm_pgtable_force_pte_cb_t force_pte_cb)
 {
        size_t pgd_sz;
-       u64 vtcr = arch->vtcr;
+       u64 vtcr = mmu->arch->vtcr;
        u32 ia_bits = VTCR_EL2_IPA(vtcr);
        u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
        u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
@@ -1135,7 +1135,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
        pgt->ia_bits            = ia_bits;
        pgt->start_level        = start_level;
        pgt->mm_ops             = mm_ops;
-       pgt->mmu                = &arch->mmu;
+       pgt->mmu                = mmu;
        pgt->flags              = flags;
        pgt->force_pte_cb       = force_pte_cb;
 
index 326cdfec74a15df906d437773b40ea7a1ae7342d..9eec548fccd19695d6c1fb6624261706b5ee30ec 100644 (file)
@@ -516,7 +516,8 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
        if (!pgt)
                return -ENOMEM;
 
-       err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops);
+       mmu->arch = &kvm->arch;
+       err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
        if (err)
                goto out_free_pgtable;
 
@@ -529,7 +530,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
        for_each_possible_cpu(cpu)
                *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
 
-       mmu->arch = &kvm->arch;
        mmu->pgt = pgt;
        mmu->pgd_phys = __pa(pgt->pgd);
        WRITE_ONCE(mmu->vmid.vmid_gen, 0);