Linux 6.10-rc3
[linux-2.6-block.git] / arch / sh / mm / cache.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/sh/mm/cache.c
4  *
5  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
6  * Copyright (C) 2002 - 2010  Paul Mundt
7  */
8 #include <linux/mm.h>
9 #include <linux/init.h>
10 #include <linux/mutex.h>
11 #include <linux/fs.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/module.h>
15 #include <asm/mmu_context.h>
16 #include <asm/cacheflush.h>
17
18 void (*local_flush_cache_all)(void *args) = cache_noop;
19 void (*local_flush_cache_mm)(void *args) = cache_noop;
20 void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
21 void (*local_flush_cache_page)(void *args) = cache_noop;
22 void (*local_flush_cache_range)(void *args) = cache_noop;
23 void (*local_flush_dcache_folio)(void *args) = cache_noop;
24 void (*local_flush_icache_range)(void *args) = cache_noop;
25 void (*local_flush_icache_folio)(void *args) = cache_noop;
26 void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
27
28 void (*__flush_wback_region)(void *start, int size);
29 EXPORT_SYMBOL(__flush_wback_region);
30 void (*__flush_purge_region)(void *start, int size);
31 EXPORT_SYMBOL(__flush_purge_region);
32 void (*__flush_invalidate_region)(void *start, int size);
33 EXPORT_SYMBOL(__flush_invalidate_region);
34
35 static inline void noop__flush_region(void *start, int size)
36 {
37 }
38
39 static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
40                                    int wait)
41 {
42         preempt_disable();
43
44         /* Needing IPI for cross-core flush is SHX3-specific. */
45 #ifdef CONFIG_CPU_SHX3
46         /*
47          * It's possible that this gets called early on when IRQs are
48          * still disabled due to ioremapping by the boot CPU, so don't
49          * even attempt IPIs unless there are other CPUs online.
50          */
51         if (num_online_cpus() > 1)
52                 smp_call_function(func, info, wait);
53 #endif
54
55         func(info);
56
57         preempt_enable();
58 }
59
60 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
61                        unsigned long vaddr, void *dst, const void *src,
62                        unsigned long len)
63 {
64         struct folio *folio = page_folio(page);
65
66         if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
67             test_bit(PG_dcache_clean, &folio->flags)) {
68                 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
69                 memcpy(vto, src, len);
70                 kunmap_coherent(vto);
71         } else {
72                 memcpy(dst, src, len);
73                 if (boot_cpu_data.dcache.n_aliases)
74                         clear_bit(PG_dcache_clean, &folio->flags);
75         }
76
77         if (vma->vm_flags & VM_EXEC)
78                 flush_cache_page(vma, vaddr, page_to_pfn(page));
79 }
80
81 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
82                          unsigned long vaddr, void *dst, const void *src,
83                          unsigned long len)
84 {
85         struct folio *folio = page_folio(page);
86
87         if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
88             test_bit(PG_dcache_clean, &folio->flags)) {
89                 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
90                 memcpy(dst, vfrom, len);
91                 kunmap_coherent(vfrom);
92         } else {
93                 memcpy(dst, src, len);
94                 if (boot_cpu_data.dcache.n_aliases)
95                         clear_bit(PG_dcache_clean, &folio->flags);
96         }
97 }
98
99 void copy_user_highpage(struct page *to, struct page *from,
100                         unsigned long vaddr, struct vm_area_struct *vma)
101 {
102         struct folio *src = page_folio(from);
103         void *vfrom, *vto;
104
105         vto = kmap_atomic(to);
106
107         if (boot_cpu_data.dcache.n_aliases && folio_mapped(src) &&
108             test_bit(PG_dcache_clean, &src->flags)) {
109                 vfrom = kmap_coherent(from, vaddr);
110                 copy_page(vto, vfrom);
111                 kunmap_coherent(vfrom);
112         } else {
113                 vfrom = kmap_atomic(from);
114                 copy_page(vto, vfrom);
115                 kunmap_atomic(vfrom);
116         }
117
118         if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
119             (vma->vm_flags & VM_EXEC))
120                 __flush_purge_region(vto, PAGE_SIZE);
121
122         kunmap_atomic(vto);
123         /* Make sure this page is cleared on other CPU's too before using it */
124         smp_wmb();
125 }
126 EXPORT_SYMBOL(copy_user_highpage);
127
128 void clear_user_highpage(struct page *page, unsigned long vaddr)
129 {
130         void *kaddr = kmap_atomic(page);
131
132         clear_page(kaddr);
133
134         if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
135                 __flush_purge_region(kaddr, PAGE_SIZE);
136
137         kunmap_atomic(kaddr);
138 }
139 EXPORT_SYMBOL(clear_user_highpage);
140
141 void __update_cache(struct vm_area_struct *vma,
142                     unsigned long address, pte_t pte)
143 {
144         unsigned long pfn = pte_pfn(pte);
145
146         if (!boot_cpu_data.dcache.n_aliases)
147                 return;
148
149         if (pfn_valid(pfn)) {
150                 struct folio *folio = page_folio(pfn_to_page(pfn));
151                 int dirty = !test_and_set_bit(PG_dcache_clean, &folio->flags);
152                 if (dirty)
153                         __flush_purge_region(folio_address(folio),
154                                                 folio_size(folio));
155         }
156 }
157
158 void __flush_anon_page(struct page *page, unsigned long vmaddr)
159 {
160         struct folio *folio = page_folio(page);
161         unsigned long addr = (unsigned long) page_address(page);
162
163         if (pages_do_alias(addr, vmaddr)) {
164                 if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) &&
165                     test_bit(PG_dcache_clean, &folio->flags)) {
166                         void *kaddr;
167
168                         kaddr = kmap_coherent(page, vmaddr);
169                         /* XXX.. For now kunmap_coherent() does a purge */
170                         /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
171                         kunmap_coherent(kaddr);
172                 } else
173                         __flush_purge_region(folio_address(folio),
174                                                 folio_size(folio));
175         }
176 }
177
178 void flush_cache_all(void)
179 {
180         cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
181 }
182 EXPORT_SYMBOL(flush_cache_all);
183
184 void flush_cache_mm(struct mm_struct *mm)
185 {
186         if (boot_cpu_data.dcache.n_aliases == 0)
187                 return;
188
189         cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
190 }
191
192 void flush_cache_dup_mm(struct mm_struct *mm)
193 {
194         if (boot_cpu_data.dcache.n_aliases == 0)
195                 return;
196
197         cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
198 }
199
200 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
201                       unsigned long pfn)
202 {
203         struct flusher_data data;
204
205         data.vma = vma;
206         data.addr1 = addr;
207         data.addr2 = pfn;
208
209         cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
210 }
211
212 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
213                        unsigned long end)
214 {
215         struct flusher_data data;
216
217         data.vma = vma;
218         data.addr1 = start;
219         data.addr2 = end;
220
221         cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
222 }
223 EXPORT_SYMBOL(flush_cache_range);
224
225 void flush_dcache_folio(struct folio *folio)
226 {
227         cacheop_on_each_cpu(local_flush_dcache_folio, folio, 1);
228 }
229 EXPORT_SYMBOL(flush_dcache_folio);
230
231 void flush_icache_range(unsigned long start, unsigned long end)
232 {
233         struct flusher_data data;
234
235         data.vma = NULL;
236         data.addr1 = start;
237         data.addr2 = end;
238
239         cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
240 }
241 EXPORT_SYMBOL(flush_icache_range);
242
243 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
244                 unsigned int nr)
245 {
246         /* Nothing uses the VMA, so just pass the folio along */
247         cacheop_on_each_cpu(local_flush_icache_folio, page_folio(page), 1);
248 }
249
250 void flush_cache_sigtramp(unsigned long address)
251 {
252         cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
253 }
254
255 static void compute_alias(struct cache_info *c)
256 {
257 #ifdef CONFIG_MMU
258         c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
259 #else
260         c->alias_mask = 0;
261 #endif
262         c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
263 }
264
265 static void __init emit_cache_params(void)
266 {
267         printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
268                 boot_cpu_data.icache.ways,
269                 boot_cpu_data.icache.sets,
270                 boot_cpu_data.icache.way_incr);
271         printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
272                 boot_cpu_data.icache.entry_mask,
273                 boot_cpu_data.icache.alias_mask,
274                 boot_cpu_data.icache.n_aliases);
275         printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
276                 boot_cpu_data.dcache.ways,
277                 boot_cpu_data.dcache.sets,
278                 boot_cpu_data.dcache.way_incr);
279         printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
280                 boot_cpu_data.dcache.entry_mask,
281                 boot_cpu_data.dcache.alias_mask,
282                 boot_cpu_data.dcache.n_aliases);
283
284         /*
285          * Emit Secondary Cache parameters if the CPU has a probed L2.
286          */
287         if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
288                 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
289                         boot_cpu_data.scache.ways,
290                         boot_cpu_data.scache.sets,
291                         boot_cpu_data.scache.way_incr);
292                 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
293                         boot_cpu_data.scache.entry_mask,
294                         boot_cpu_data.scache.alias_mask,
295                         boot_cpu_data.scache.n_aliases);
296         }
297 }
298
299 void __init cpu_cache_init(void)
300 {
301         unsigned int cache_disabled = 0;
302
303 #ifdef SH_CCR
304         cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
305 #endif
306
307         compute_alias(&boot_cpu_data.icache);
308         compute_alias(&boot_cpu_data.dcache);
309         compute_alias(&boot_cpu_data.scache);
310
311         __flush_wback_region            = noop__flush_region;
312         __flush_purge_region            = noop__flush_region;
313         __flush_invalidate_region       = noop__flush_region;
314
315         /*
316          * No flushing is necessary in the disabled cache case so we can
317          * just keep the noop functions in local_flush_..() and __flush_..()
318          */
319         if (unlikely(cache_disabled))
320                 goto skip;
321
322         if (boot_cpu_data.type == CPU_J2) {
323                 j2_cache_init();
324         } else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
325                 sh2_cache_init();
326         }
327
328         if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
329                 sh2a_cache_init();
330         }
331
332         if (boot_cpu_data.family == CPU_FAMILY_SH3) {
333                 sh3_cache_init();
334
335                 if ((boot_cpu_data.type == CPU_SH7705) &&
336                     (boot_cpu_data.dcache.sets == 512)) {
337                         sh7705_cache_init();
338                 }
339         }
340
341         if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
342             (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
343             (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
344                 sh4_cache_init();
345
346                 if ((boot_cpu_data.type == CPU_SH7786) ||
347                     (boot_cpu_data.type == CPU_SHX3)) {
348                         shx3_cache_init();
349                 }
350         }
351
352 skip:
353         emit_cache_params();
354 }