riscv: mm: Add support for Svinval extension
authorMayuresh Chitale <mchitale@ventanamicro.com>
Tue, 2 Jul 2024 10:26:37 +0000 (15:56 +0530)
committerPalmer Dabbelt <palmer@dabbelt.com>
Thu, 5 Jun 2025 21:03:06 +0000 (14:03 -0700)
The Svinval extension splits SFENCE.VMA instruction into finer-grained
invalidation and ordering operations and is mandatory for RVA23S64 profile.
When Svinval is enabled the local_flush_tlb_range_threshold_asid function
should use the following sequence to optimize the tlb flushes instead of
a simple sfence.vma:

sfence.w.inval
svinval.vma
  .
  .
svinval.vma
sfence.inval.ir

The maximum number of consecutive svinval.vma instructions that
can be executed in local_flush_tlb_range_threshold_asid function
is limited to 64. This is required to avoid soft lockups and the
approach is similar to that used in arm64.

Signed-off-by: Mayuresh Chitale <mchitale@ventanamicro.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Link: https://lore.kernel.org/r/20240702102637.9074-1-mchitale@ventanamicro.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
arch/riscv/mm/tlbflush.c

index f9e27ba1df99ffeb5d4d450e933c15eb4aeeb92e..6289ed5c7eb45732a18d915480d5c62bee9db5fc 100644 (file)
@@ -7,6 +7,27 @@
 #include <linux/mmu_notifier.h>
 #include <asm/sbi.h>
 #include <asm/mmu_context.h>
+#include <asm/cpufeature.h>
+
+#define has_svinval()  riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
+
+static inline void local_sfence_inval_ir(void)
+{
+       asm volatile(SFENCE_INVAL_IR() ::: "memory");
+}
+
+static inline void local_sfence_w_inval(void)
+{
+       asm volatile(SFENCE_W_INVAL() ::: "memory");
+}
+
+static inline void local_sinval_vma(unsigned long vma, unsigned long asid)
+{
+       if (asid != FLUSH_TLB_NO_ASID)
+               asm volatile(SINVAL_VMA(%0, %1) : : "r" (vma), "r" (asid) : "memory");
+       else
+               asm volatile(SINVAL_VMA(%0, zero) : : "r" (vma) : "memory");
+}
 
 /*
  * Flush entire TLB if number of entries to be flushed is greater
@@ -27,6 +48,16 @@ static void local_flush_tlb_range_threshold_asid(unsigned long start,
                return;
        }
 
+       if (has_svinval()) {
+               local_sfence_w_inval();
+               for (i = 0; i < nr_ptes_in_range; ++i) {
+                       local_sinval_vma(start, asid);
+                       start += stride;
+               }
+               local_sfence_inval_ir();
+               return;
+       }
+
        for (i = 0; i < nr_ptes_in_range; ++i) {
                local_flush_tlb_page_asid(start, asid);
                start += stride;