KVM: arm64: nv: Handle TLB invalidation targeting L2 stage-1
authorMarc Zyngier <maz@kernel.org>
Fri, 14 Jun 2024 14:45:43 +0000 (15:45 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Wed, 19 Jun 2024 08:14:37 +0000 (08:14 +0000)
While dealing with TLB invalidation targeting the guest hypervisor's
own stage-1 was easy, doing the same thing for its own guests is
a bit more involved.

Since such an invalidation is scoped by VMID, it needs to apply to
all s2_mmu contexts that have been tagged by that VMID, irrespective
of the value of VTTBR_EL2.BADDR.

So for each s2_mmu context matching that VMID, we invalidate the
corresponding TLBs, each context having its own "physical" VMID.

Co-developed-by: Jintack Lim <jintack.lim@linaro.org>
Co-developed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Jintack Lim <jintack.lim@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240614144552.2773592-8-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_nested.h
arch/arm64/kvm/nested.c
arch/arm64/kvm/sys_regs.c

index 76b88c6406022036bd339e10ebfa8ae4da024730..9b7c92ab87cf221f481aa2453a3443683ef80271 100644 (file)
@@ -65,6 +65,13 @@ extern void kvm_init_nested(struct kvm *kvm);
 extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
 extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
 extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
+
+union tlbi_info;
+
+extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
+                                      const union tlbi_info *info,
+                                      void (*)(struct kvm_s2_mmu *,
+                                               const union tlbi_info *));
 extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
 extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
 
index 114a3f59c28b218937fa6b9f0a483335ac307c23..a2734d135211c261d493b19dac1db9df615b5d83 100644 (file)
@@ -364,6 +364,41 @@ int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
        return ret;
 }
 
+/*
+ * We can have multiple *different* MMU contexts with the same VMID:
+ *
+ * - S2 being enabled or not, hence differing by the HCR_EL2.VM bit
+ *
+ * - Multiple vcpus using private S2s (huh huh...), hence differing by the
+ *   VBBTR_EL2.BADDR address
+ *
+ * - A combination of the above...
+ *
+ * We can always identify which MMU context to pick at run-time.  However,
+ * TLB invalidation involving a VMID must take action on all the TLBs using
+ * this particular VMID. This translates into applying the same invalidation
+ * operation to all the contexts that are using this VMID. Moar phun!
+ */
+void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
+                               const union tlbi_info *info,
+                               void (*tlbi_callback)(struct kvm_s2_mmu *,
+                                                     const union tlbi_info *))
+{
+       write_lock(&kvm->mmu_lock);
+
+       for (int i = 0; i < kvm->arch.nested_mmus_size; i++) {
+               struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
+
+               if (!kvm_s2_mmu_valid(mmu))
+                       continue;
+
+               if (vmid == get_vmid(mmu->tlb_vttbr))
+                       tlbi_callback(mmu, info);
+       }
+
+       write_unlock(&kvm->mmu_lock);
+}
+
 struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
index 22b45a15d06881ce25ee124f8b69a20491b5df3d..b22309fca3a78c705e7688ed5eed2239e7f991cf 100644 (file)
@@ -2741,6 +2741,73 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        EL2_REG(SP_EL2, NULL, reset_unknown, 0),
 };
 
+/* Only defined here as this is an internal "abstraction" */
+union tlbi_info {
+       struct {
+               u64     start;
+               u64     size;
+       } range;
+
+       struct {
+               u64     addr;
+       } ipa;
+
+       struct {
+               u64     addr;
+               u32     encoding;
+       } va;
+};
+
+static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
+                            const union tlbi_info *info)
+{
+       WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
+}
+
+static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+                           const struct sys_reg_desc *r)
+{
+       u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
+       u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
+
+       /*
+        * If we're here, this is because we've trapped on a EL1 TLBI
+        * instruction that affects the EL1 translation regime while
+        * we're running in a context that doesn't allow us to let the
+        * HW do its thing (aka vEL2):
+        *
+        * - HCR_EL2.E2H == 0 : a non-VHE guest
+        * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
+        *
+        * We don't expect these helpers to ever be called when running
+        * in a vEL1 context.
+        */
+
+       WARN_ON(!vcpu_is_el2(vcpu));
+
+       if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) {
+               kvm_inject_undefined(vcpu);
+               return false;
+       }
+
+       kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
+                                  &(union tlbi_info) {
+                                          .va = {
+                                                  .addr = p->regval,
+                                                  .encoding = sys_encoding,
+                                          },
+                                  },
+                                  s2_mmu_tlbi_s1e1);
+
+       return true;
+}
+
+#define SYS_INSN(insn, access_fn)                                      \
+       {                                                               \
+               SYS_DESC(OP_##insn),                                    \
+               .access = (access_fn),                                  \
+       }
+
 static struct sys_reg_desc sys_insn_descs[] = {
        { SYS_DESC(SYS_DC_ISW), access_dcsw },
        { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
@@ -2751,6 +2818,19 @@ static struct sys_reg_desc sys_insn_descs[] = {
        { SYS_DESC(SYS_DC_CISW), access_dcsw },
        { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
        { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
+
+       SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
+       SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
+       SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
+       SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
+       SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
+       SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
+       SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
+       SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
+       SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
+       SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
+       SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
+       SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
 };
 
 static const struct sys_reg_desc *first_idreg;