1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/sched.h>
6 #include <linux/hugetlb.h>
8 #include <asm/mmu_context.h>
11 * Flush entire TLB if number of entries to be flushed is greater
12 * than the threshold below.
14 unsigned long tlb_flush_all_threshold __read_mostly = 64;
16 static void local_flush_tlb_range_threshold_asid(unsigned long start,
21 unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
24 if (nr_ptes_in_range > tlb_flush_all_threshold) {
25 local_flush_tlb_all_asid(asid);
29 for (i = 0; i < nr_ptes_in_range; ++i) {
30 local_flush_tlb_page_asid(start, asid);
35 static inline void local_flush_tlb_range_asid(unsigned long start,
36 unsigned long size, unsigned long stride, unsigned long asid)
39 local_flush_tlb_page_asid(start, asid);
40 else if (size == FLUSH_TLB_MAX_SIZE)
41 local_flush_tlb_all_asid(asid);
43 local_flush_tlb_range_threshold_asid(start, size, stride, asid);
46 /* Flush a range of kernel pages without broadcasting */
47 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
49 local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
52 static void __ipi_flush_tlb_all(void *info)
54 local_flush_tlb_all();
57 void flush_tlb_all(void)
59 if (num_online_cpus() < 2)
60 local_flush_tlb_all();
61 else if (riscv_use_sbi_for_rfence())
62 sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
64 on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
67 struct flush_tlb_range_data {
74 static void __ipi_flush_tlb_range_asid(void *info)
76 struct flush_tlb_range_data *d = info;
78 local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
81 static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
82 unsigned long start, unsigned long size,
87 if (cpumask_empty(cmask))
92 /* Check if the TLB flush needs to be sent to other CPUs. */
93 if (cpumask_any_but(cmask, cpu) >= nr_cpu_ids) {
94 local_flush_tlb_range_asid(start, size, stride, asid);
95 } else if (riscv_use_sbi_for_rfence()) {
96 sbi_remote_sfence_vma_asid(cmask, start, size, asid);
98 struct flush_tlb_range_data ftd;
104 on_each_cpu_mask(cmask, __ipi_flush_tlb_range_asid, &ftd, 1);
110 static inline unsigned long get_mm_asid(struct mm_struct *mm)
112 return cntx2asid(atomic_long_read(&mm->context.id));
115 void flush_tlb_mm(struct mm_struct *mm)
117 __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
118 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
121 void flush_tlb_mm_range(struct mm_struct *mm,
122 unsigned long start, unsigned long end,
123 unsigned int page_size)
125 __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
126 start, end - start, page_size);
129 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
131 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
132 addr, PAGE_SIZE, PAGE_SIZE);
135 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
138 unsigned long stride_size;
140 if (!is_vm_hugetlb_page(vma)) {
141 stride_size = PAGE_SIZE;
143 stride_size = huge_page_size(hstate_vma(vma));
146 * As stated in the privileged specification, every PTE in a
147 * NAPOT region must be invalidated, so reset the stride in that
151 if (stride_size >= PGDIR_SIZE)
152 stride_size = PGDIR_SIZE;
153 else if (stride_size >= P4D_SIZE)
154 stride_size = P4D_SIZE;
155 else if (stride_size >= PUD_SIZE)
156 stride_size = PUD_SIZE;
157 else if (stride_size >= PMD_SIZE)
158 stride_size = PMD_SIZE;
160 stride_size = PAGE_SIZE;
164 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
165 start, end - start, stride_size);
168 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
170 __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
171 start, end - start, PAGE_SIZE);
174 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
175 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
178 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
179 start, end - start, PMD_SIZE);
183 bool arch_tlbbatch_should_defer(struct mm_struct *mm)
188 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
189 struct mm_struct *mm,
192 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
195 void arch_flush_tlb_batched_pending(struct mm_struct *mm)
200 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
202 __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
203 FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
204 cpumask_clear(&batch->cpumask);