KVM: arm64: Implement __kvm_tlb_flush_vmid_range()
authorRaghavendra Rao Ananta <rananta@google.com>
Fri, 11 Aug 2023 04:51:22 +0000 (04:51 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 17 Aug 2023 08:40:35 +0000 (09:40 +0100)
Define  __kvm_tlb_flush_vmid_range() (for VHE and nVHE)
to flush a range of stage-2 page-tables using IPA in one go.
If the system supports FEAT_TLBIRANGE, the following patches
would conveniently replace global TLBI such as vmalls12e1is
in the map, unmap, and dirty-logging paths with ripas2e1is
instead.

Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Reviewed-by: Shaoqin Huang <shahuang@redhat.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230811045127.3308641-10-rananta@google.com
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/arm64/kvm/hyp/vhe/tlb.c

index 7d170aaa2db4195b5405a46caf6bcbcb22f0fe38..2c27cb8cf442df904b643fec3f7b5cfb15409361 100644 (file)
@@ -70,6 +70,7 @@ enum __kvm_host_smccc_func {
        __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
        __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
        __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
+       __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
        __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
        __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
        __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
@@ -229,6 +230,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
 extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
                                         phys_addr_t ipa,
                                         int level);
+extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+                                       phys_addr_t start, unsigned long pages);
 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
 
 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
index a169c619db60b85e92f33c631ceb84d27173c674..857d9bc04fd48a58bedaa8ab457088bfd7817600 100644 (file)
@@ -135,6 +135,16 @@ static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctx
        __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level);
 }
 
+static void
+handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt)
+{
+       DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+       DECLARE_REG(phys_addr_t, start, host_ctxt, 2);
+       DECLARE_REG(unsigned long, pages, host_ctxt, 3);
+
+       __kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages);
+}
+
 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
@@ -327,6 +337,7 @@ static const hcall_t host_hcall[] = {
        HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
        HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh),
        HANDLE_FUNC(__kvm_tlb_flush_vmid),
+       HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
        HANDLE_FUNC(__kvm_flush_cpu_context),
        HANDLE_FUNC(__kvm_timer_set_cntvoff),
        HANDLE_FUNC(__vgic_v3_read_vmcr),
index b9991bbd8e3fd47baf911ef84dd7402173603879..1b265713d6bede228b47a423d0f1d81a83070a5c 100644 (file)
@@ -182,6 +182,36 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
        __tlb_switch_to_host(&cxt);
 }
 
+void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+                               phys_addr_t start, unsigned long pages)
+{
+       struct tlb_inv_context cxt;
+       unsigned long stride;
+
+       /*
+        * Since the range of addresses may not be mapped at
+        * the same level, assume the worst case as PAGE_SIZE
+        */
+       stride = PAGE_SIZE;
+       start = round_down(start, stride);
+
+       /* Switch to requested VMID */
+       __tlb_switch_to_guest(mmu, &cxt, false);
+
+       __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+
+       dsb(ish);
+       __tlbi(vmalle1is);
+       dsb(ish);
+       isb();
+
+       /* See the comment in __kvm_tlb_flush_vmid_ipa() */
+       if (icache_is_vpipt())
+               icache_inval_all_pou();
+
+       __tlb_switch_to_host(&cxt);
+}
+
 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
 {
        struct tlb_inv_context cxt;
index e69da550cdc5b50b5dce1de0283608ac063e7d27..46bd43f61d76f5dc1676cfad45a64830e138a526 100644 (file)
@@ -143,6 +143,34 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
        __tlb_switch_to_host(&cxt);
 }
 
+void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+                               phys_addr_t start, unsigned long pages)
+{
+       struct tlb_inv_context cxt;
+       unsigned long stride;
+
+       /*
+        * Since the range of addresses may not be mapped at
+        * the same level, assume the worst case as PAGE_SIZE
+        */
+       stride = PAGE_SIZE;
+       start = round_down(start, stride);
+
+       dsb(ishst);
+
+       /* Switch to requested VMID */
+       __tlb_switch_to_guest(mmu, &cxt);
+
+       __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+
+       dsb(ish);
+       __tlbi(vmalle1is);
+       dsb(ish);
+       isb();
+
+       __tlb_switch_to_host(&cxt);
+}
+
 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
 {
        struct tlb_inv_context cxt;