1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
9 #include <asm/mmu_context.h>
10 #include <asm/pgtable.h>
11 #include <asm/setup.h>
14 * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
17 #define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
18 #define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
20 void flush_tlb_all(void)
25 void flush_tlb_mm(struct mm_struct *mm)
27 #ifdef CONFIG_CPU_HAS_TLBI
28 asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
35 * MMU operation regs only could invalid tlb entry in jtlb and we
36 * need change asid field to invalid I-utlb & D-utlb.
38 #ifndef CONFIG_CPU_HAS_TLBI
39 #define restore_asid_inv_utlb(oldpid, newpid) \
41 if (oldpid == newpid) \
42 write_mmu_entryhi(oldpid + 1); \
43 write_mmu_entryhi(oldpid); \
47 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
50 unsigned long newpid = cpu_asid(vma->vm_mm);
52 start &= TLB_ENTRY_SIZE_MASK;
53 end += TLB_ENTRY_SIZE - 1;
54 end &= TLB_ENTRY_SIZE_MASK;
56 #ifdef CONFIG_CPU_HAS_TLBI
58 asm volatile("tlbi.vas %0"::"r"(start | newpid));
64 unsigned long flags, oldpid;
66 local_irq_save(flags);
67 oldpid = read_mmu_entryhi() & ASID_MASK;
71 write_mmu_entryhi(start | newpid);
74 idx = read_mmu_index();
76 tlb_invalid_indexed();
78 restore_asid_inv_utlb(oldpid, newpid);
79 local_irq_restore(flags);
84 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
86 start &= TLB_ENTRY_SIZE_MASK;
87 end += TLB_ENTRY_SIZE - 1;
88 end &= TLB_ENTRY_SIZE_MASK;
90 #ifdef CONFIG_CPU_HAS_TLBI
92 asm volatile("tlbi.vaas %0"::"r"(start));
98 unsigned long flags, oldpid;
100 local_irq_save(flags);
101 oldpid = read_mmu_entryhi() & ASID_MASK;
102 while (start < end) {
105 write_mmu_entryhi(start | oldpid);
106 start += 2*PAGE_SIZE;
108 idx = read_mmu_index();
110 tlb_invalid_indexed();
112 restore_asid_inv_utlb(oldpid, oldpid);
113 local_irq_restore(flags);
118 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
120 int newpid = cpu_asid(vma->vm_mm);
122 addr &= TLB_ENTRY_SIZE_MASK;
124 #ifdef CONFIG_CPU_HAS_TLBI
125 asm volatile("tlbi.vas %0"::"r"(addr | newpid));
132 local_irq_save(flags);
133 oldpid = read_mmu_entryhi() & ASID_MASK;
134 write_mmu_entryhi(addr | newpid);
136 idx = read_mmu_index();
138 tlb_invalid_indexed();
140 restore_asid_inv_utlb(oldpid, newpid);
141 local_irq_restore(flags);
146 void flush_tlb_one(unsigned long addr)
148 addr &= TLB_ENTRY_SIZE_MASK;
150 #ifdef CONFIG_CPU_HAS_TLBI
151 asm volatile("tlbi.vaas %0"::"r"(addr));
158 local_irq_save(flags);
159 oldpid = read_mmu_entryhi() & ASID_MASK;
160 write_mmu_entryhi(addr | oldpid);
162 idx = read_mmu_index();
164 tlb_invalid_indexed();
166 restore_asid_inv_utlb(oldpid, oldpid);
167 local_irq_restore(flags);
171 EXPORT_SYMBOL(flush_tlb_one);