ARCv2: MMUv4: TLB programming Model changes
[linux-2.6-block.git] / arch / arc / mm / cache.c
CommitLineData
95d6976d 1/*
8ea2ddff 2 * ARC Cache Management
95d6976d 3 *
8ea2ddff 4 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
95d6976d
VG
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
95d6976d
VG
10 */
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/cache.h>
16#include <linux/mmu_context.h>
17#include <linux/syscalls.h>
18#include <linux/uaccess.h>
4102b533 19#include <linux/pagemap.h>
95d6976d
VG
20#include <asm/cacheflush.h>
21#include <asm/cachectl.h>
22#include <asm/setup.h>
23
c3441edd 24char *arc_cache_mumbojumbo(int c, char *buf, int len)
af617428
VG
25{
26 int n = 0;
af617428 27
da40ff48 28#define PR_CACHE(p, cfg, str) \
af617428
VG
29 if (!(p)->ver) \
30 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
31 else \
32 n += scnprintf(buf + n, len - n, \
da40ff48
VG
33 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
34 (p)->sz_k, (p)->assoc, (p)->line_len, \
35 (p)->vipt ? "VIPT" : "PIPT", \
36 (p)->alias ? " aliasing" : "", \
37 IS_ENABLED(cfg) ? "" : " (not used)");
af617428 38
da40ff48
VG
39 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
40 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
af617428
VG
41
42 return buf;
43}
44
95d6976d
VG
45/*
46 * Read the Cache Build Confuration Registers, Decode them and save into
47 * the cpuinfo structure for later use.
48 * No Validation done here, simply read/convert the BCRs
49 */
ce759956 50void read_decode_cache_bcr(void)
95d6976d 51{
95d6976d
VG
52 struct cpuinfo_arc_cache *p_ic, *p_dc;
53 unsigned int cpu = smp_processor_id();
da1677b0
VG
54 struct bcr_cache {
55#ifdef CONFIG_CPU_BIG_ENDIAN
56 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
57#else
58 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
59#endif
60 } ibcr, dbcr;
95d6976d
VG
61
62 p_ic = &cpuinfo_arc700[cpu].icache;
63 READ_BCR(ARC_REG_IC_BCR, ibcr);
64
da40ff48
VG
65 if (!ibcr.ver)
66 goto dc_chk;
67
30499186
VG
68 BUG_ON(ibcr.config != 3);
69 p_ic->assoc = 2; /* Fixed to 2w set assoc */
95d6976d 70 p_ic->line_len = 8 << ibcr.line_len;
da40ff48 71 p_ic->sz_k = 1 << (ibcr.sz - 1);
95d6976d 72 p_ic->ver = ibcr.ver;
da40ff48
VG
73 p_ic->vipt = 1;
74 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
95d6976d 75
da40ff48 76dc_chk:
95d6976d
VG
77 p_dc = &cpuinfo_arc700[cpu].dcache;
78 READ_BCR(ARC_REG_DC_BCR, dbcr);
79
da40ff48
VG
80 if (!dbcr.ver)
81 return;
82
30499186
VG
83 BUG_ON(dbcr.config != 2);
84 p_dc->assoc = 4; /* Fixed to 4w set assoc */
95d6976d 85 p_dc->line_len = 16 << dbcr.line_len;
da40ff48 86 p_dc->sz_k = 1 << (dbcr.sz - 1);
95d6976d 87 p_dc->ver = dbcr.ver;
da40ff48
VG
88 p_dc->vipt = 1;
89 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
95d6976d
VG
90}
91
92/*
8ea2ddff 93 * Line Operation on {I,D}-Cache
95d6976d 94 */
95d6976d
VG
95
96#define OP_INV 0x1
97#define OP_FLUSH 0x2
98#define OP_FLUSH_N_INV 0x3
bd12976c
VG
99#define OP_INV_IC 0x4
100
101/*
8ea2ddff
VG
102 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
103 *
104 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
105 * The orig Cache Management Module "CDU" only required paddr to invalidate a
106 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
107 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
108 * the exact same line.
109 *
110 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
111 * paddr alone could not be used to correctly index the cache.
112 *
113 * ------------------
114 * MMU v1/v2 (Fixed Page Size 8k)
115 * ------------------
116 * The solution was to provide CDU with these additonal vaddr bits. These
117 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
118 * standard page size of 8k.
119 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
120 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
121 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
122 * represent the offset within cache-line. The adv of using this "clumsy"
123 * interface for additional info was no new reg was needed in CDU programming
124 * model.
125 *
126 * 17:13 represented the max num of bits passable, actual bits needed were
127 * fewer, based on the num-of-aliases possible.
128 * -for 2 alias possibility, only bit 13 needed (32K cache)
129 * -for 4 alias possibility, bits 14:13 needed (64K cache)
130 *
131 * ------------------
132 * MMU v3
133 * ------------------
134 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
135 * only support 8k (default), 16k and 4k.
136 * However from hardware perspective, smaller page sizes aggrevate aliasing
137 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
138 * the existing scheme of piggybacking won't work for certain configurations.
139 * Two new registers IC_PTAG and DC_PTAG inttoduced.
140 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
bd12976c 141 */
8ea2ddff 142
11e14896
VG
143static inline
144void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
145 unsigned long sz, const int op)
bd12976c 146{
11e14896 147 unsigned int aux_cmd;
bd12976c 148 int num_lines;
11e14896 149 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
bd12976c 150
8ea2ddff 151 if (op == OP_INV_IC) {
bd12976c 152 aux_cmd = ARC_REG_IC_IVIL;
11e14896 153 } else {
bd12976c 154 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
8ea2ddff 155 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
bd12976c
VG
156 }
157
158 /* Ensure we properly floor/ceil the non-line aligned/sized requests
159 * and have @paddr - aligned to cache line and integral @num_lines.
160 * This however can be avoided for page sized since:
161 * -@paddr will be cache-line aligned already (being page aligned)
162 * -@sz will be integral multiple of line size (being page sized).
163 */
11e14896 164 if (!full_page) {
bd12976c
VG
165 sz += paddr & ~CACHE_LINE_MASK;
166 paddr &= CACHE_LINE_MASK;
167 vaddr &= CACHE_LINE_MASK;
168 }
169
170 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
171
bd12976c
VG
172 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
173 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
11e14896
VG
174
175 while (num_lines-- > 0) {
176 write_aux_reg(aux_cmd, paddr);
177 paddr += L1_CACHE_BYTES;
178 }
179}
180
181static inline
182void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
183 unsigned long sz, const int op)
184{
185 unsigned int aux_cmd, aux_tag;
186 int num_lines;
187 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
188
189 if (op == OP_INV_IC) {
190 aux_cmd = ARC_REG_IC_IVIL;
191 aux_tag = ARC_REG_IC_PTAG;
192 } else {
193 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
194 aux_tag = ARC_REG_DC_PTAG;
195 }
196
197 /* Ensure we properly floor/ceil the non-line aligned/sized requests
198 * and have @paddr - aligned to cache line and integral @num_lines.
199 * This however can be avoided for page sized since:
200 * -@paddr will be cache-line aligned already (being page aligned)
201 * -@sz will be integral multiple of line size (being page sized).
202 */
203 if (!full_page) {
204 sz += paddr & ~CACHE_LINE_MASK;
205 paddr &= CACHE_LINE_MASK;
206 vaddr &= CACHE_LINE_MASK;
207 }
208 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
209
210 /*
211 * MMUv3, cache ops require paddr in PTAG reg
212 * if V-P const for loop, PTAG can be written once outside loop
213 */
214 if (full_page)
b053940d 215 write_aux_reg(aux_tag, paddr);
bd12976c
VG
216
217 while (num_lines-- > 0) {
11e14896 218 if (!full_page) {
d4599baf
VG
219 write_aux_reg(aux_tag, paddr);
220 paddr += L1_CACHE_BYTES;
221 }
bd12976c
VG
222
223 write_aux_reg(aux_cmd, vaddr);
224 vaddr += L1_CACHE_BYTES;
bd12976c
VG
225 }
226}
95d6976d 227
11e14896
VG
228#if (CONFIG_ARC_MMU_VER < 3)
229#define __cache_line_loop __cache_line_loop_v2
230#elif (CONFIG_ARC_MMU_VER == 3)
231#define __cache_line_loop __cache_line_loop_v3
232#endif
233
95d6976d
VG
234#ifdef CONFIG_ARC_HAS_DCACHE
235
236/***************************************************************
237 * Machine specific helpers for Entire D-Cache or Per Line ops
238 */
239
6c310681 240static inline void __before_dc_op(const int op)
95d6976d 241{
1b1a22b1
VG
242 if (op == OP_FLUSH_N_INV) {
243 /* Dcache provides 2 cmd: FLUSH or INV
244 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
245 * flush-n-inv is achieved by INV cmd but with IM=1
246 * So toggle INV sub-mode depending on op request and default
247 */
6c310681
VG
248 const unsigned int ctl = ARC_REG_DC_CTRL;
249 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
1b1a22b1 250 }
1b1a22b1
VG
251}
252
6c310681 253static inline void __after_dc_op(const int op)
1b1a22b1 254{
6c310681
VG
255 if (op & OP_FLUSH) {
256 const unsigned int ctl = ARC_REG_DC_CTRL;
257 unsigned int reg;
1b1a22b1 258
6c310681
VG
259 /* flush / flush-n-inv both wait */
260 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
261 ;
262
263 /* Switch back to default Invalidate mode */
264 if (op == OP_FLUSH_N_INV)
265 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
266 }
95d6976d
VG
267}
268
269/*
270 * Operation on Entire D-Cache
8ea2ddff 271 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
95d6976d
VG
272 * Note that constant propagation ensures all the checks are gone
273 * in generated code
274 */
8ea2ddff 275static inline void __dc_entire_op(const int op)
95d6976d 276{
95d6976d
VG
277 int aux;
278
6c310681 279 __before_dc_op(op);
95d6976d 280
8ea2ddff 281 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
95d6976d
VG
282 aux = ARC_REG_DC_IVDC;
283 else
284 aux = ARC_REG_DC_FLSH;
285
286 write_aux_reg(aux, 0x1);
287
6c310681 288 __after_dc_op(op);
95d6976d
VG
289}
290
4102b533 291/* For kernel mappings cache operation: index is same as paddr */
6ec18a81
VG
292#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
293
95d6976d 294/*
8ea2ddff 295 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
95d6976d 296 */
6ec18a81 297static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
8ea2ddff 298 unsigned long sz, const int op)
95d6976d 299{
1b1a22b1 300 unsigned long flags;
95d6976d
VG
301
302 local_irq_save(flags);
303
6c310681 304 __before_dc_op(op);
95d6976d 305
8ea2ddff 306 __cache_line_loop(paddr, vaddr, sz, op);
95d6976d 307
6c310681 308 __after_dc_op(op);
95d6976d
VG
309
310 local_irq_restore(flags);
311}
312
313#else
314
8ea2ddff
VG
315#define __dc_entire_op(op)
316#define __dc_line_op(paddr, vaddr, sz, op)
317#define __dc_line_op_k(paddr, sz, op)
95d6976d
VG
318
319#endif /* CONFIG_ARC_HAS_DCACHE */
320
95d6976d
VG
321#ifdef CONFIG_ARC_HAS_ICACHE
322
af5abf1b
VG
323static inline void __ic_entire_inv(void)
324{
325 write_aux_reg(ARC_REG_IC_IVIC, 1);
326 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
327}
328
329static inline void
330__ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
331 unsigned long sz)
95d6976d
VG
332{
333 unsigned long flags;
95d6976d
VG
334
335 local_irq_save(flags);
bd12976c 336 __cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
95d6976d
VG
337 local_irq_restore(flags);
338}
339
af5abf1b
VG
340#ifndef CONFIG_SMP
341
342#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
343
344#else
336e199e 345
af5abf1b 346struct ic_inv_args {
2328af0c
VG
347 unsigned long paddr, vaddr;
348 int sz;
349};
350
351static void __ic_line_inv_vaddr_helper(void *info)
352{
014018e0 353 struct ic_inv_args *ic_inv = info;
af5abf1b 354
2328af0c
VG
355 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
356}
357
358static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
359 unsigned long sz)
360{
af5abf1b
VG
361 struct ic_inv_args ic_inv = {
362 .paddr = paddr,
363 .vaddr = vaddr,
364 .sz = sz
365 };
366
2328af0c
VG
367 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
368}
af5abf1b
VG
369
370#endif /* CONFIG_SMP */
371
372#else /* !CONFIG_ARC_HAS_ICACHE */
95d6976d 373
336e199e 374#define __ic_entire_inv()
95d6976d
VG
375#define __ic_line_inv_vaddr(pstart, vstart, sz)
376
377#endif /* CONFIG_ARC_HAS_ICACHE */
378
379
380/***********************************************************
381 * Exported APIs
382 */
383
4102b533
VG
384/*
385 * Handle cache congruency of kernel and userspace mappings of page when kernel
386 * writes-to/reads-from
387 *
388 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
389 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
390 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
391 * -In SMP, if hardware caches are coherent
392 *
393 * There's a corollary case, where kernel READs from a userspace mapped page.
394 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
395 */
95d6976d
VG
396void flush_dcache_page(struct page *page)
397{
4102b533
VG
398 struct address_space *mapping;
399
400 if (!cache_is_vipt_aliasing()) {
2ed21dae 401 clear_bit(PG_dc_clean, &page->flags);
4102b533
VG
402 return;
403 }
404
405 /* don't handle anon pages here */
406 mapping = page_mapping(page);
407 if (!mapping)
408 return;
409
410 /*
411 * pagecache page, file not yet mapped to userspace
412 * Make a note that K-mapping is dirty
413 */
414 if (!mapping_mapped(mapping)) {
2ed21dae 415 clear_bit(PG_dc_clean, &page->flags);
4102b533
VG
416 } else if (page_mapped(page)) {
417
418 /* kernel reading from page with U-mapping */
45309493 419 unsigned long paddr = (unsigned long)page_address(page);
4102b533
VG
420 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
421
422 if (addr_not_cache_congruent(paddr, vaddr))
423 __flush_dcache_page(paddr, vaddr);
424 }
95d6976d
VG
425}
426EXPORT_SYMBOL(flush_dcache_page);
427
428
429void dma_cache_wback_inv(unsigned long start, unsigned long sz)
430{
6ec18a81 431 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
95d6976d
VG
432}
433EXPORT_SYMBOL(dma_cache_wback_inv);
434
435void dma_cache_inv(unsigned long start, unsigned long sz)
436{
6ec18a81 437 __dc_line_op_k(start, sz, OP_INV);
95d6976d
VG
438}
439EXPORT_SYMBOL(dma_cache_inv);
440
441void dma_cache_wback(unsigned long start, unsigned long sz)
442{
6ec18a81 443 __dc_line_op_k(start, sz, OP_FLUSH);
95d6976d
VG
444}
445EXPORT_SYMBOL(dma_cache_wback);
446
447/*
7586bf72
VG
448 * This is API for making I/D Caches consistent when modifying
449 * kernel code (loadable modules, kprobes, kgdb...)
95d6976d
VG
450 * This is called on insmod, with kernel virtual address for CODE of
451 * the module. ARC cache maintenance ops require PHY address thus we
452 * need to convert vmalloc addr to PHY addr
453 */
454void flush_icache_range(unsigned long kstart, unsigned long kend)
455{
c59414cc 456 unsigned int tot_sz;
95d6976d 457
c59414cc 458 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
95d6976d
VG
459
460 /* Shortcut for bigger flush ranges.
461 * Here we don't care if this was kernel virtual or phy addr
462 */
463 tot_sz = kend - kstart;
464 if (tot_sz > PAGE_SIZE) {
465 flush_cache_all();
466 return;
467 }
468
469 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
470 if (likely(kstart > PAGE_OFFSET)) {
7586bf72
VG
471 /*
472 * The 2nd arg despite being paddr will be used to index icache
473 * This is OK since no alternate virtual mappings will exist
474 * given the callers for this case: kprobe/kgdb in built-in
475 * kernel code only.
476 */
94bad1af 477 __sync_icache_dcache(kstart, kstart, kend - kstart);
95d6976d
VG
478 return;
479 }
480
481 /*
482 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
483 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
484 * handling of kernel vaddr.
485 *
486 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
487 * it still needs to handle a 2 page scenario, where the range
488 * straddles across 2 virtual pages and hence need for loop
489 */
490 while (tot_sz > 0) {
c59414cc
VG
491 unsigned int off, sz;
492 unsigned long phy, pfn;
493
95d6976d
VG
494 off = kstart % PAGE_SIZE;
495 pfn = vmalloc_to_pfn((void *)kstart);
496 phy = (pfn << PAGE_SHIFT) + off;
497 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
94bad1af 498 __sync_icache_dcache(phy, kstart, sz);
95d6976d
VG
499 kstart += sz;
500 tot_sz -= sz;
501 }
502}
e3560305 503EXPORT_SYMBOL(flush_icache_range);
95d6976d
VG
504
505/*
94bad1af
VG
506 * General purpose helper to make I and D cache lines consistent.
507 * @paddr is phy addr of region
4b06ff35
VG
508 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
509 * However in one instance, when called by kprobe (for a breakpt in
94bad1af
VG
510 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
511 * use a paddr to index the cache (despite VIPT). This is fine since since a
4b06ff35
VG
512 * builtin kernel page will not have any virtual mappings.
513 * kprobe on loadable module will be kernel vaddr.
95d6976d 514 */
94bad1af 515void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
95d6976d 516{
f538881c 517 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
2328af0c 518 __ic_line_inv_vaddr(paddr, vaddr, len);
95d6976d
VG
519}
520
24603fdd
VG
521/* wrapper to compile time eliminate alignment checks in flush loop */
522void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
95d6976d 523{
24603fdd 524 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
95d6976d
VG
525}
526
6ec18a81
VG
527/*
528 * wrapper to clearout kernel or userspace mappings of a page
529 * For kernel mappings @vaddr == @paddr
530 */
45309493 531void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
eacd0e95 532{
6ec18a81 533 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
eacd0e95
VG
534}
535
95d6976d
VG
536noinline void flush_cache_all(void)
537{
538 unsigned long flags;
539
540 local_irq_save(flags);
541
336e199e 542 __ic_entire_inv();
95d6976d
VG
543 __dc_entire_op(OP_FLUSH_N_INV);
544
545 local_irq_restore(flags);
546
547}
548
4102b533
VG
549#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
550
551void flush_cache_mm(struct mm_struct *mm)
552{
553 flush_cache_all();
554}
555
556void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
557 unsigned long pfn)
558{
559 unsigned int paddr = pfn << PAGE_SHIFT;
560
5971bc71
VG
561 u_vaddr &= PAGE_MASK;
562
45309493 563 __flush_dcache_page(paddr, u_vaddr);
5971bc71
VG
564
565 if (vma->vm_flags & VM_EXEC)
566 __inv_icache_page(paddr, u_vaddr);
4102b533
VG
567}
568
569void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
570 unsigned long end)
571{
572 flush_cache_all();
573}
574
7bb66f6e
VG
575void flush_anon_page(struct vm_area_struct *vma, struct page *page,
576 unsigned long u_vaddr)
577{
578 /* TBD: do we really need to clear the kernel mapping */
579 __flush_dcache_page(page_address(page), u_vaddr);
580 __flush_dcache_page(page_address(page), page_address(page));
581
582}
583
584#endif
585
4102b533
VG
586void copy_user_highpage(struct page *to, struct page *from,
587 unsigned long u_vaddr, struct vm_area_struct *vma)
588{
45309493
VG
589 unsigned long kfrom = (unsigned long)page_address(from);
590 unsigned long kto = (unsigned long)page_address(to);
4102b533
VG
591 int clean_src_k_mappings = 0;
592
593 /*
594 * If SRC page was already mapped in userspace AND it's U-mapping is
595 * not congruent with K-mapping, sync former to physical page so that
596 * K-mapping in memcpy below, sees the right data
597 *
598 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
599 * equally valid for SRC page as well
600 */
601 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
602 __flush_dcache_page(kfrom, u_vaddr);
603 clean_src_k_mappings = 1;
604 }
605
45309493 606 copy_page((void *)kto, (void *)kfrom);
4102b533
VG
607
608 /*
609 * Mark DST page K-mapping as dirty for a later finalization by
610 * update_mmu_cache(). Although the finalization could have been done
611 * here as well (given that both vaddr/paddr are available).
612 * But update_mmu_cache() already has code to do that for other
613 * non copied user pages (e.g. read faults which wire in pagecache page
614 * directly).
615 */
2ed21dae 616 clear_bit(PG_dc_clean, &to->flags);
4102b533
VG
617
618 /*
619 * if SRC was already usermapped and non-congruent to kernel mapping
620 * sync the kernel mapping back to physical page
621 */
622 if (clean_src_k_mappings) {
623 __flush_dcache_page(kfrom, kfrom);
2ed21dae 624 set_bit(PG_dc_clean, &from->flags);
4102b533 625 } else {
2ed21dae 626 clear_bit(PG_dc_clean, &from->flags);
4102b533
VG
627 }
628}
629
630void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
631{
632 clear_page(to);
2ed21dae 633 clear_bit(PG_dc_clean, &page->flags);
4102b533
VG
634}
635
4102b533 636
95d6976d
VG
637/**********************************************************************
638 * Explicit Cache flush request from user space via syscall
639 * Needed for JITs which generate code on the fly
640 */
641SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
642{
643 /* TBD: optimize this */
644 flush_cache_all();
645 return 0;
646}
8ea2ddff
VG
647
648void arc_cache_init(void)
649{
650 unsigned int __maybe_unused cpu = smp_processor_id();
651 char str[256];
652
653 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
654
655 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
656 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
657
658 if (!ic->ver)
659 panic("cache support enabled but non-existent cache\n");
660
661 if (ic->line_len != L1_CACHE_BYTES)
662 panic("ICache line [%d] != kernel Config [%d]",
663 ic->line_len, L1_CACHE_BYTES);
664
665 if (ic->ver != CONFIG_ARC_MMU_VER)
666 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
667 ic->ver, CONFIG_ARC_MMU_VER);
668 }
669
670 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
671 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
672 int handled;
673
674 if (!dc->ver)
675 panic("cache support enabled but non-existent cache\n");
676
677 if (dc->line_len != L1_CACHE_BYTES)
678 panic("DCache line [%d] != kernel Config [%d]",
679 dc->line_len, L1_CACHE_BYTES);
680
681 /* check for D-Cache aliasing */
682 handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
683
684 if (dc->alias && !handled)
685 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
686 else if (!dc->alias && handled)
687 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
688 }
689}