Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
95d6976d | 2 | /* |
8ea2ddff | 3 | * ARC Cache Management |
95d6976d | 4 | * |
8ea2ddff | 5 | * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) |
95d6976d | 6 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
95d6976d VG |
7 | */ |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/mm.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/cache.h> | |
13 | #include <linux/mmu_context.h> | |
14 | #include <linux/syscalls.h> | |
15 | #include <linux/uaccess.h> | |
4102b533 | 16 | #include <linux/pagemap.h> |
95d6976d VG |
17 | #include <asm/cacheflush.h> |
18 | #include <asm/cachectl.h> | |
19 | #include <asm/setup.h> | |
20 | ||
0d77117f VG |
21 | #ifdef CONFIG_ISA_ARCV2 |
22 | #define USE_RGN_FLSH 1 | |
23 | #endif | |
24 | ||
795f4558 | 25 | static int l2_line_sz; |
cf986d47 | 26 | static int ioc_exists; |
d0e73e2a | 27 | int slc_enable = 1, ioc_enable = 1; |
deaf7565 | 28 | unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ |
26c01c49 | 29 | unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ |
795f4558 | 30 | |
17a5ed56 VG |
31 | static struct cpuinfo_arc_cache { |
32 | unsigned int sz_k, line_len, colors; | |
33 | } ic_info, dc_info, slc_info; | |
34 | ||
28b4af72 | 35 | void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr, |
7d3d162b | 36 | unsigned long sz, const int op, const int full_page); |
bcc4d65a | 37 | |
f5db19e9 VG |
38 | void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz); |
39 | void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz); | |
40 | void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz); | |
f2b0b25a | 41 | |
fad84e39 | 42 | static int read_decode_cache_bcr_arcv2(int c, char *buf, int len) |
95d6976d | 43 | { |
17a5ed56 VG |
44 | struct cpuinfo_arc_cache *p_slc = &slc_info; |
45 | struct bcr_identity ident; | |
d1f317d8 | 46 | struct bcr_generic sbcr; |
17a5ed56 VG |
47 | struct bcr_clust_cfg cbcr; |
48 | struct bcr_volatile vol; | |
49 | int n = 0; | |
26c01c49 | 50 | |
fd0881a2 VG |
51 | READ_BCR(ARC_REG_SLC_BCR, sbcr); |
52 | if (sbcr.ver) { | |
17a5ed56 | 53 | struct bcr_slc_cfg slc_cfg; |
fd0881a2 | 54 | READ_BCR(ARC_REG_SLC_CFG, slc_cfg); |
fd0881a2 VG |
55 | p_slc->sz_k = 128 << slc_cfg.sz; |
56 | l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; | |
17a5ed56 VG |
57 | n += scnprintf(buf + n, len - n, |
58 | "SLC\t\t: %uK, %uB Line%s\n", | |
59 | p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable)); | |
fd0881a2 VG |
60 | } |
61 | ||
62 | READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); | |
99bd5fcc | 63 | if (cbcr.c) { |
fd0881a2 | 64 | ioc_exists = 1; |
99bd5fcc VG |
65 | |
66 | /* | |
67 | * As for today we don't support both IOC and ZONE_HIGHMEM enabled | |
68 | * simultaneously. This happens because as of today IOC aperture covers | |
69 | * only ZONE_NORMAL (low mem) and any dma transactions outside this | |
70 | * region won't be HW coherent. | |
71 | * If we want to use both IOC and ZONE_HIGHMEM we can use | |
72 | * bounce_buffer to handle dma transactions to HIGHMEM. | |
73 | * Also it is possible to modify dma_direct cache ops or increase IOC | |
74 | * aperture size if we are planning to use HIGHMEM without PAE. | |
75 | */ | |
76 | if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled()) | |
77 | ioc_enable = 0; | |
78 | } else { | |
cf986d47 | 79 | ioc_enable = 0; |
99bd5fcc | 80 | } |
deaf7565 | 81 | |
17a5ed56 VG |
82 | READ_BCR(AUX_IDENTITY, ident); |
83 | ||
26c01c49 | 84 | /* HS 2.0 didn't have AUX_VOL */ |
17a5ed56 | 85 | if (ident.family > 0x51) { |
26c01c49 VG |
86 | READ_BCR(AUX_VOL, vol); |
87 | perip_base = vol.start << 28; | |
88 | /* HS 3.0 has limit and strict-ordering fields */ | |
17a5ed56 | 89 | if (ident.family > 0x52) |
26c01c49 VG |
90 | perip_end = (vol.limit << 28) - 1; |
91 | } | |
17a5ed56 VG |
92 | |
93 | n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", | |
94 | perip_base, | |
95 | IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); | |
96 | ||
fad84e39 | 97 | return n; |
fd0881a2 VG |
98 | } |
99 | ||
fad84e39 | 100 | int arc_cache_mumbojumbo(int c, char *buf, int len) |
fd0881a2 | 101 | { |
17a5ed56 VG |
102 | struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info; |
103 | struct bcr_cache ibcr, dbcr; | |
104 | int vipt, assoc; | |
105 | int n = 0; | |
fd0881a2 | 106 | |
95d6976d | 107 | READ_BCR(ARC_REG_IC_BCR, ibcr); |
da40ff48 VG |
108 | if (!ibcr.ver) |
109 | goto dc_chk; | |
110 | ||
17a5ed56 | 111 | if (is_isa_arcompact() && (ibcr.ver <= 3)) { |
d1f317d8 | 112 | BUG_ON(ibcr.config != 3); |
17a5ed56 VG |
113 | assoc = 2; /* Fixed to 2w set assoc */ |
114 | } else if (is_isa_arcv2() && (ibcr.ver >= 4)) { | |
115 | assoc = 1 << ibcr.config; /* 1,2,4,8 */ | |
d1f317d8 VG |
116 | } |
117 | ||
95d6976d | 118 | p_ic->line_len = 8 << ibcr.line_len; |
da40ff48 | 119 | p_ic->sz_k = 1 << (ibcr.sz - 1); |
17a5ed56 VG |
120 | p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE); |
121 | ||
122 | n += scnprintf(buf + n, len - n, | |
123 | "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n", | |
124 | p_ic->sz_k, assoc, p_ic->line_len, | |
125 | p_ic->colors > 1 ? " aliasing" : "", | |
126 | IS_USED_CFG(CONFIG_ARC_HAS_ICACHE)); | |
95d6976d | 127 | |
da40ff48 | 128 | dc_chk: |
95d6976d | 129 | READ_BCR(ARC_REG_DC_BCR, dbcr); |
da40ff48 | 130 | if (!dbcr.ver) |
d1f317d8 VG |
131 | goto slc_chk; |
132 | ||
17a5ed56 | 133 | if (is_isa_arcompact() && (dbcr.ver <= 3)) { |
d1f317d8 | 134 | BUG_ON(dbcr.config != 2); |
17a5ed56 VG |
135 | vipt = 1; |
136 | assoc = 4; /* Fixed to 4w set assoc */ | |
137 | p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE); | |
138 | } else if (is_isa_arcv2() && (dbcr.ver >= 4)) { | |
139 | vipt = 0; | |
140 | assoc = 1 << dbcr.config; /* 1,2,4,8 */ | |
141 | p_dc->colors = 1; /* PIPT so can't VIPT alias */ | |
d1f317d8 | 142 | } |
da40ff48 | 143 | |
95d6976d | 144 | p_dc->line_len = 16 << dbcr.line_len; |
da40ff48 | 145 | p_dc->sz_k = 1 << (dbcr.sz - 1); |
d1f317d8 | 146 | |
17a5ed56 | 147 | n += scnprintf(buf + n, len - n, |
6732c0e4 | 148 | "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n", |
17a5ed56 VG |
149 | p_dc->sz_k, assoc, p_dc->line_len, |
150 | vipt ? "VIPT" : "PIPT", | |
17a5ed56 VG |
151 | IS_USED_CFG(CONFIG_ARC_HAS_DCACHE)); |
152 | ||
d1f317d8 | 153 | slc_chk: |
fd0881a2 | 154 | if (is_isa_arcv2()) |
fad84e39 | 155 | n += read_decode_cache_bcr_arcv2(c, buf + n, len - n); |
17a5ed56 | 156 | |
fad84e39 | 157 | return n; |
95d6976d VG |
158 | } |
159 | ||
160 | /* | |
8ea2ddff | 161 | * Line Operation on {I,D}-Cache |
95d6976d | 162 | */ |
95d6976d VG |
163 | |
164 | #define OP_INV 0x1 | |
165 | #define OP_FLUSH 0x2 | |
166 | #define OP_FLUSH_N_INV 0x3 | |
bd12976c VG |
167 | #define OP_INV_IC 0x4 |
168 | ||
169 | /* | |
288ff7de | 170 | * Cache Flush programming model |
8ea2ddff | 171 | * |
288ff7de VG |
172 | * ARC700 MMUv3 I$ and D$ are both VIPT and can potentially alias. |
173 | * Programming model requires both paddr and vaddr irrespecive of aliasing | |
174 | * considerations: | |
175 | * - vaddr in {I,D}C_IV?L | |
176 | * - paddr in {I,D}C_PTAG | |
8ea2ddff | 177 | * |
288ff7de VG |
178 | * In HS38x (MMUv4), D$ is PIPT, I$ is VIPT and can still alias. |
179 | * Programming model is different for aliasing vs. non-aliasing I$ | |
180 | * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L | |
181 | * - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$) | |
8ea2ddff | 182 | * |
288ff7de VG |
183 | * - If PAE40 is enabled, independent of aliasing considerations, the higher |
184 | * bits needs to be written into PTAG_HI | |
bd12976c | 185 | */ |
8ea2ddff | 186 | |
11e14896 | 187 | static inline |
28b4af72 | 188 | void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, |
7d3d162b | 189 | unsigned long sz, const int op, const int full_page) |
11e14896 VG |
190 | { |
191 | unsigned int aux_cmd, aux_tag; | |
192 | int num_lines; | |
11e14896 VG |
193 | |
194 | if (op == OP_INV_IC) { | |
195 | aux_cmd = ARC_REG_IC_IVIL; | |
196 | aux_tag = ARC_REG_IC_PTAG; | |
197 | } else { | |
198 | aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; | |
199 | aux_tag = ARC_REG_DC_PTAG; | |
200 | } | |
201 | ||
202 | /* Ensure we properly floor/ceil the non-line aligned/sized requests | |
203 | * and have @paddr - aligned to cache line and integral @num_lines. | |
204 | * This however can be avoided for page sized since: | |
205 | * -@paddr will be cache-line aligned already (being page aligned) | |
206 | * -@sz will be integral multiple of line size (being page sized). | |
207 | */ | |
208 | if (!full_page) { | |
209 | sz += paddr & ~CACHE_LINE_MASK; | |
210 | paddr &= CACHE_LINE_MASK; | |
211 | vaddr &= CACHE_LINE_MASK; | |
212 | } | |
213 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); | |
214 | ||
215 | /* | |
216 | * MMUv3, cache ops require paddr in PTAG reg | |
217 | * if V-P const for loop, PTAG can be written once outside loop | |
218 | */ | |
219 | if (full_page) | |
b053940d | 220 | write_aux_reg(aux_tag, paddr); |
bd12976c | 221 | |
5a364c2a VG |
222 | /* |
223 | * This is technically for MMU v4, using the MMU v3 programming model | |
2547476a | 224 | * Special work for HS38 aliasing I-cache configuration with PAE40 |
5a364c2a VG |
225 | * - upper 8 bits of paddr need to be written into PTAG_HI |
226 | * - (and needs to be written before the lower 32 bits) | |
227 | * Note that PTAG_HI is hoisted outside the line loop | |
228 | */ | |
229 | if (is_pae40_enabled() && op == OP_INV_IC) | |
230 | write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); | |
231 | ||
bd12976c | 232 | while (num_lines-- > 0) { |
11e14896 | 233 | if (!full_page) { |
d4599baf VG |
234 | write_aux_reg(aux_tag, paddr); |
235 | paddr += L1_CACHE_BYTES; | |
236 | } | |
bd12976c VG |
237 | |
238 | write_aux_reg(aux_cmd, vaddr); | |
239 | vaddr += L1_CACHE_BYTES; | |
bd12976c VG |
240 | } |
241 | } | |
95d6976d | 242 | |
0d77117f VG |
243 | #ifndef USE_RGN_FLSH |
244 | ||
d1f317d8 | 245 | /* |
d1f317d8 VG |
246 | */ |
247 | static inline | |
28b4af72 | 248 | void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, |
7d3d162b | 249 | unsigned long sz, const int op, const int full_page) |
d1f317d8 VG |
250 | { |
251 | unsigned int aux_cmd; | |
252 | int num_lines; | |
d1f317d8 | 253 | |
7d3d162b | 254 | if (op == OP_INV_IC) { |
d1f317d8 VG |
255 | aux_cmd = ARC_REG_IC_IVIL; |
256 | } else { | |
257 | /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ | |
7d3d162b | 258 | aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; |
d1f317d8 VG |
259 | } |
260 | ||
261 | /* Ensure we properly floor/ceil the non-line aligned/sized requests | |
262 | * and have @paddr - aligned to cache line and integral @num_lines. | |
263 | * This however can be avoided for page sized since: | |
264 | * -@paddr will be cache-line aligned already (being page aligned) | |
265 | * -@sz will be integral multiple of line size (being page sized). | |
266 | */ | |
7d3d162b | 267 | if (!full_page) { |
d1f317d8 VG |
268 | sz += paddr & ~CACHE_LINE_MASK; |
269 | paddr &= CACHE_LINE_MASK; | |
270 | } | |
271 | ||
272 | num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); | |
273 | ||
5a364c2a VG |
274 | /* |
275 | * For HS38 PAE40 configuration | |
276 | * - upper 8 bits of paddr need to be written into PTAG_HI | |
277 | * - (and needs to be written before the lower 32 bits) | |
278 | */ | |
279 | if (is_pae40_enabled()) { | |
7d3d162b | 280 | if (op == OP_INV_IC) |
5a364c2a VG |
281 | /* |
282 | * Non aliasing I-cache in HS38, | |
283 | * aliasing I-cache handled in __cache_line_loop_v3() | |
284 | */ | |
285 | write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); | |
286 | else | |
287 | write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32); | |
288 | } | |
289 | ||
d1f317d8 VG |
290 | while (num_lines-- > 0) { |
291 | write_aux_reg(aux_cmd, paddr); | |
292 | paddr += L1_CACHE_BYTES; | |
293 | } | |
294 | } | |
295 | ||
0d77117f VG |
296 | #else |
297 | ||
298 | /* | |
299 | * optimized flush operation which takes a region as opposed to iterating per line | |
300 | */ | |
301 | static inline | |
302 | void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, | |
303 | unsigned long sz, const int op, const int full_page) | |
304 | { | |
ee40bd1e | 305 | unsigned int s, e; |
0d77117f VG |
306 | |
307 | /* Only for Non aliasing I-cache in HS38 */ | |
308 | if (op == OP_INV_IC) { | |
309 | s = ARC_REG_IC_IVIR; | |
310 | e = ARC_REG_IC_ENDR; | |
311 | } else { | |
312 | s = ARC_REG_DC_STARTR; | |
313 | e = ARC_REG_DC_ENDR; | |
314 | } | |
315 | ||
316 | if (!full_page) { | |
317 | /* for any leading gap between @paddr and start of cache line */ | |
318 | sz += paddr & ~CACHE_LINE_MASK; | |
319 | paddr &= CACHE_LINE_MASK; | |
320 | ||
321 | /* | |
322 | * account for any trailing gap to end of cache line | |
323 | * this is equivalent to DIV_ROUND_UP() in line ops above | |
324 | */ | |
325 | sz += L1_CACHE_BYTES - 1; | |
326 | } | |
327 | ||
328 | if (is_pae40_enabled()) { | |
329 | /* TBD: check if crossing 4TB boundary */ | |
330 | if (op == OP_INV_IC) | |
331 | write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); | |
332 | else | |
333 | write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32); | |
334 | } | |
335 | ||
0d77117f VG |
336 | /* ENDR needs to be set ahead of START */ |
337 | write_aux_reg(e, paddr + sz); /* ENDR is exclusive */ | |
338 | write_aux_reg(s, paddr); | |
339 | ||
340 | /* caller waits on DC_CTRL.FS */ | |
341 | } | |
342 | ||
343 | #endif | |
344 | ||
288ff7de | 345 | #ifdef CONFIG_ARC_MMU_V3 |
11e14896 | 346 | #define __cache_line_loop __cache_line_loop_v3 |
288ff7de | 347 | #else |
d1f317d8 | 348 | #define __cache_line_loop __cache_line_loop_v4 |
11e14896 VG |
349 | #endif |
350 | ||
95d6976d VG |
351 | #ifdef CONFIG_ARC_HAS_DCACHE |
352 | ||
353 | /*************************************************************** | |
354 | * Machine specific helpers for Entire D-Cache or Per Line ops | |
355 | */ | |
356 | ||
ee40bd1e VG |
357 | #ifndef USE_RGN_FLSH |
358 | /* | |
359 | * this version avoids extra read/write of DC_CTRL for flush or invalid ops | |
360 | * in the non region flush regime (such as for ARCompact) | |
361 | */ | |
6c310681 | 362 | static inline void __before_dc_op(const int op) |
95d6976d | 363 | { |
1b1a22b1 VG |
364 | if (op == OP_FLUSH_N_INV) { |
365 | /* Dcache provides 2 cmd: FLUSH or INV | |
ecaa054f | 366 | * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE |
1b1a22b1 VG |
367 | * flush-n-inv is achieved by INV cmd but with IM=1 |
368 | * So toggle INV sub-mode depending on op request and default | |
369 | */ | |
6c310681 VG |
370 | const unsigned int ctl = ARC_REG_DC_CTRL; |
371 | write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH); | |
1b1a22b1 | 372 | } |
1b1a22b1 VG |
373 | } |
374 | ||
ee40bd1e VG |
375 | #else |
376 | ||
377 | static inline void __before_dc_op(const int op) | |
378 | { | |
379 | const unsigned int ctl = ARC_REG_DC_CTRL; | |
380 | unsigned int val = read_aux_reg(ctl); | |
381 | ||
382 | if (op == OP_FLUSH_N_INV) { | |
383 | val |= DC_CTRL_INV_MODE_FLUSH; | |
384 | } | |
385 | ||
386 | if (op != OP_INV_IC) { | |
387 | /* | |
388 | * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1 | |
389 | * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above | |
390 | */ | |
391 | val &= ~DC_CTRL_RGN_OP_MSK; | |
392 | if (op & OP_INV) | |
393 | val |= DC_CTRL_RGN_OP_INV; | |
394 | } | |
395 | write_aux_reg(ctl, val); | |
396 | } | |
397 | ||
398 | #endif | |
399 | ||
400 | ||
6c310681 | 401 | static inline void __after_dc_op(const int op) |
1b1a22b1 | 402 | { |
6c310681 VG |
403 | if (op & OP_FLUSH) { |
404 | const unsigned int ctl = ARC_REG_DC_CTRL; | |
405 | unsigned int reg; | |
1b1a22b1 | 406 | |
6c310681 VG |
407 | /* flush / flush-n-inv both wait */ |
408 | while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS) | |
409 | ; | |
410 | ||
411 | /* Switch back to default Invalidate mode */ | |
412 | if (op == OP_FLUSH_N_INV) | |
413 | write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH); | |
414 | } | |
95d6976d VG |
415 | } |
416 | ||
417 | /* | |
418 | * Operation on Entire D-Cache | |
8ea2ddff | 419 | * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} |
95d6976d VG |
420 | * Note that constant propagation ensures all the checks are gone |
421 | * in generated code | |
422 | */ | |
8ea2ddff | 423 | static inline void __dc_entire_op(const int op) |
95d6976d | 424 | { |
95d6976d VG |
425 | int aux; |
426 | ||
6c310681 | 427 | __before_dc_op(op); |
95d6976d | 428 | |
8ea2ddff | 429 | if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ |
95d6976d VG |
430 | aux = ARC_REG_DC_IVDC; |
431 | else | |
432 | aux = ARC_REG_DC_FLSH; | |
433 | ||
434 | write_aux_reg(aux, 0x1); | |
435 | ||
6c310681 | 436 | __after_dc_op(op); |
95d6976d VG |
437 | } |
438 | ||
8c47f83b VG |
439 | static inline void __dc_disable(void) |
440 | { | |
441 | const int r = ARC_REG_DC_CTRL; | |
442 | ||
443 | __dc_entire_op(OP_FLUSH_N_INV); | |
444 | write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS); | |
445 | } | |
446 | ||
447 | static void __dc_enable(void) | |
448 | { | |
449 | const int r = ARC_REG_DC_CTRL; | |
450 | ||
451 | write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS); | |
452 | } | |
453 | ||
4102b533 | 454 | /* For kernel mappings cache operation: index is same as paddr */ |
6ec18a81 VG |
455 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) |
456 | ||
95d6976d | 457 | /* |
8ea2ddff | 458 | * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) |
95d6976d | 459 | */ |
28b4af72 | 460 | static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr, |
8ea2ddff | 461 | unsigned long sz, const int op) |
95d6976d | 462 | { |
7d3d162b | 463 | const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
1b1a22b1 | 464 | unsigned long flags; |
95d6976d VG |
465 | |
466 | local_irq_save(flags); | |
467 | ||
6c310681 | 468 | __before_dc_op(op); |
95d6976d | 469 | |
7d3d162b | 470 | __cache_line_loop(paddr, vaddr, sz, op, full_page); |
95d6976d | 471 | |
6c310681 | 472 | __after_dc_op(op); |
95d6976d VG |
473 | |
474 | local_irq_restore(flags); | |
475 | } | |
476 | ||
477 | #else | |
478 | ||
8ea2ddff | 479 | #define __dc_entire_op(op) |
8c47f83b VG |
480 | #define __dc_disable() |
481 | #define __dc_enable() | |
8ea2ddff VG |
482 | #define __dc_line_op(paddr, vaddr, sz, op) |
483 | #define __dc_line_op_k(paddr, sz, op) | |
95d6976d VG |
484 | |
485 | #endif /* CONFIG_ARC_HAS_DCACHE */ | |
486 | ||
95d6976d VG |
487 | #ifdef CONFIG_ARC_HAS_ICACHE |
488 | ||
af5abf1b VG |
489 | static inline void __ic_entire_inv(void) |
490 | { | |
491 | write_aux_reg(ARC_REG_IC_IVIC, 1); | |
492 | read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ | |
493 | } | |
494 | ||
495 | static inline void | |
28b4af72 | 496 | __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr, |
af5abf1b | 497 | unsigned long sz) |
95d6976d | 498 | { |
7d3d162b | 499 | const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; |
95d6976d | 500 | unsigned long flags; |
95d6976d VG |
501 | |
502 | local_irq_save(flags); | |
7d3d162b | 503 | (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page); |
95d6976d VG |
504 | local_irq_restore(flags); |
505 | } | |
506 | ||
af5abf1b VG |
507 | #ifndef CONFIG_SMP |
508 | ||
509 | #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) | |
510 | ||
511 | #else | |
336e199e | 512 | |
af5abf1b | 513 | struct ic_inv_args { |
28b4af72 | 514 | phys_addr_t paddr, vaddr; |
2328af0c VG |
515 | int sz; |
516 | }; | |
517 | ||
518 | static void __ic_line_inv_vaddr_helper(void *info) | |
519 | { | |
014018e0 | 520 | struct ic_inv_args *ic_inv = info; |
af5abf1b | 521 | |
2328af0c VG |
522 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); |
523 | } | |
524 | ||
28b4af72 | 525 | static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, |
2328af0c VG |
526 | unsigned long sz) |
527 | { | |
af5abf1b VG |
528 | struct ic_inv_args ic_inv = { |
529 | .paddr = paddr, | |
530 | .vaddr = vaddr, | |
531 | .sz = sz | |
532 | }; | |
533 | ||
2328af0c VG |
534 | on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); |
535 | } | |
af5abf1b VG |
536 | |
537 | #endif /* CONFIG_SMP */ | |
538 | ||
539 | #else /* !CONFIG_ARC_HAS_ICACHE */ | |
95d6976d | 540 | |
336e199e | 541 | #define __ic_entire_inv() |
95d6976d VG |
542 | #define __ic_line_inv_vaddr(pstart, vstart, sz) |
543 | ||
544 | #endif /* CONFIG_ARC_HAS_ICACHE */ | |
545 | ||
4d369680 | 546 | static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op) |
795f4558 VG |
547 | { |
548 | #ifdef CONFIG_ISA_ARCV2 | |
b607eddd AB |
549 | /* |
550 | * SLC is shared between all cores and concurrent aux operations from | |
551 | * multiple cores need to be serialized using a spinlock | |
552 | * A concurrent operation can be silently ignored and/or the old/new | |
553 | * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop | |
554 | * below) | |
555 | */ | |
556 | static DEFINE_SPINLOCK(lock); | |
795f4558 VG |
557 | unsigned long flags; |
558 | unsigned int ctrl; | |
7d79cee2 | 559 | phys_addr_t end; |
795f4558 | 560 | |
b607eddd | 561 | spin_lock_irqsave(&lock, flags); |
795f4558 VG |
562 | |
563 | /* | |
564 | * The Region Flush operation is specified by CTRL.RGN_OP[11..9] | |
565 | * - b'000 (default) is Flush, | |
566 | * - b'001 is Invalidate if CTRL.IM == 0 | |
567 | * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 | |
568 | */ | |
569 | ctrl = read_aux_reg(ARC_REG_SLC_CTRL); | |
570 | ||
571 | /* Don't rely on default value of IM bit */ | |
572 | if (!(op & OP_FLUSH)) /* i.e. OP_INV */ | |
573 | ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ | |
574 | else | |
575 | ctrl |= SLC_CTRL_IM; | |
576 | ||
577 | if (op & OP_INV) | |
578 | ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ | |
579 | else | |
580 | ctrl &= ~SLC_CTRL_RGN_OP_INV; | |
581 | ||
582 | write_aux_reg(ARC_REG_SLC_CTRL, ctrl); | |
583 | ||
584 | /* | |
585 | * Lower bits are ignored, no need to clip | |
586 | * END needs to be setup before START (latter triggers the operation) | |
587 | * END can't be same as START, so add (l2_line_sz - 1) to sz | |
588 | */ | |
7d79cee2 AB |
589 | end = paddr + sz + l2_line_sz - 1; |
590 | if (is_pae40_enabled()) | |
591 | write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end)); | |
592 | ||
593 | write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end)); | |
594 | ||
595 | if (is_pae40_enabled()) | |
596 | write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr)); | |
597 | ||
598 | write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr)); | |
795f4558 | 599 | |
b37174d9 AB |
600 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ |
601 | read_aux_reg(ARC_REG_SLC_CTRL); | |
602 | ||
795f4558 VG |
603 | while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); |
604 | ||
b607eddd | 605 | spin_unlock_irqrestore(&lock, flags); |
795f4558 VG |
606 | #endif |
607 | } | |
608 | ||
4d369680 | 609 | static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op) |
ae0b63d9 VG |
610 | { |
611 | #ifdef CONFIG_ISA_ARCV2 | |
612 | /* | |
613 | * SLC is shared between all cores and concurrent aux operations from | |
614 | * multiple cores need to be serialized using a spinlock | |
615 | * A concurrent operation can be silently ignored and/or the old/new | |
616 | * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop | |
617 | * below) | |
618 | */ | |
619 | static DEFINE_SPINLOCK(lock); | |
620 | ||
621 | const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1); | |
622 | unsigned int ctrl, cmd; | |
623 | unsigned long flags; | |
624 | int num_lines; | |
625 | ||
626 | spin_lock_irqsave(&lock, flags); | |
627 | ||
628 | ctrl = read_aux_reg(ARC_REG_SLC_CTRL); | |
629 | ||
630 | /* Don't rely on default value of IM bit */ | |
631 | if (!(op & OP_FLUSH)) /* i.e. OP_INV */ | |
632 | ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ | |
633 | else | |
634 | ctrl |= SLC_CTRL_IM; | |
635 | ||
636 | write_aux_reg(ARC_REG_SLC_CTRL, ctrl); | |
637 | ||
638 | cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL; | |
639 | ||
640 | sz += paddr & ~SLC_LINE_MASK; | |
641 | paddr &= SLC_LINE_MASK; | |
642 | ||
643 | num_lines = DIV_ROUND_UP(sz, l2_line_sz); | |
644 | ||
645 | while (num_lines-- > 0) { | |
646 | write_aux_reg(cmd, paddr); | |
647 | paddr += l2_line_sz; | |
648 | } | |
649 | ||
650 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ | |
651 | read_aux_reg(ARC_REG_SLC_CTRL); | |
652 | ||
653 | while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); | |
654 | ||
655 | spin_unlock_irqrestore(&lock, flags); | |
656 | #endif | |
657 | } | |
658 | ||
659 | #define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op) | |
660 | ||
d4911cdd VG |
661 | noinline static void slc_entire_op(const int op) |
662 | { | |
663 | unsigned int ctrl, r = ARC_REG_SLC_CTRL; | |
664 | ||
665 | ctrl = read_aux_reg(r); | |
666 | ||
667 | if (!(op & OP_FLUSH)) /* i.e. OP_INV */ | |
668 | ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ | |
669 | else | |
670 | ctrl |= SLC_CTRL_IM; | |
671 | ||
672 | write_aux_reg(r, ctrl); | |
673 | ||
8bbfbc2d EP |
674 | if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ |
675 | write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1); | |
676 | else | |
677 | write_aux_reg(ARC_REG_SLC_FLUSH, 0x1); | |
d4911cdd | 678 | |
c70c4733 AB |
679 | /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ |
680 | read_aux_reg(r); | |
681 | ||
d4911cdd VG |
682 | /* Important to wait for flush to complete */ |
683 | while (read_aux_reg(r) & SLC_CTRL_BUSY); | |
684 | } | |
685 | ||
686 | static inline void arc_slc_disable(void) | |
687 | { | |
688 | const int r = ARC_REG_SLC_CTRL; | |
689 | ||
690 | slc_entire_op(OP_FLUSH_N_INV); | |
691 | write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS); | |
692 | } | |
693 | ||
694 | static inline void arc_slc_enable(void) | |
695 | { | |
696 | const int r = ARC_REG_SLC_CTRL; | |
697 | ||
698 | write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS); | |
699 | } | |
700 | ||
95d6976d VG |
701 | /*********************************************************** |
702 | * Exported APIs | |
703 | */ | |
704 | ||
ac4cfacc | 705 | void flush_dcache_folio(struct folio *folio) |
95d6976d | 706 | { |
6732c0e4 VG |
707 | clear_bit(PG_dc_clean, &folio->flags); |
708 | return; | |
95d6976d | 709 | } |
ac4cfacc MWO |
710 | EXPORT_SYMBOL(flush_dcache_folio); |
711 | ||
712 | void flush_dcache_page(struct page *page) | |
713 | { | |
714 | return flush_dcache_folio(page_folio(page)); | |
715 | } | |
95d6976d VG |
716 | EXPORT_SYMBOL(flush_dcache_page); |
717 | ||
f2b0b25a AB |
718 | /* |
719 | * DMA ops for systems with L1 cache only | |
720 | * Make memory coherent with L1 cache by flushing/invalidating L1 lines | |
721 | */ | |
f5db19e9 | 722 | static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz) |
95d6976d | 723 | { |
6ec18a81 | 724 | __dc_line_op_k(start, sz, OP_FLUSH_N_INV); |
f2b0b25a | 725 | } |
795f4558 | 726 | |
f5db19e9 | 727 | static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz) |
f2b0b25a AB |
728 | { |
729 | __dc_line_op_k(start, sz, OP_INV); | |
95d6976d | 730 | } |
95d6976d | 731 | |
f5db19e9 | 732 | static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz) |
f2b0b25a AB |
733 | { |
734 | __dc_line_op_k(start, sz, OP_FLUSH); | |
735 | } | |
736 | ||
737 | /* | |
738 | * DMA ops for systems with both L1 and L2 caches, but without IOC | |
7423cc0c | 739 | * Both L1 and L2 lines need to be explicitly flushed/invalidated |
f2b0b25a | 740 | */ |
f5db19e9 | 741 | static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz) |
f2b0b25a AB |
742 | { |
743 | __dc_line_op_k(start, sz, OP_FLUSH_N_INV); | |
744 | slc_op(start, sz, OP_FLUSH_N_INV); | |
745 | } | |
746 | ||
f5db19e9 | 747 | static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz) |
95d6976d | 748 | { |
6ec18a81 | 749 | __dc_line_op_k(start, sz, OP_INV); |
f2b0b25a AB |
750 | slc_op(start, sz, OP_INV); |
751 | } | |
795f4558 | 752 | |
f5db19e9 | 753 | static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz) |
f2b0b25a AB |
754 | { |
755 | __dc_line_op_k(start, sz, OP_FLUSH); | |
756 | slc_op(start, sz, OP_FLUSH); | |
757 | } | |
758 | ||
f2b0b25a AB |
759 | /* |
760 | * Exported DMA API | |
761 | */ | |
f5db19e9 | 762 | void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) |
f2b0b25a AB |
763 | { |
764 | __dma_cache_wback_inv(start, sz); | |
765 | } | |
766 | EXPORT_SYMBOL(dma_cache_wback_inv); | |
767 | ||
f5db19e9 | 768 | void dma_cache_inv(phys_addr_t start, unsigned long sz) |
f2b0b25a AB |
769 | { |
770 | __dma_cache_inv(start, sz); | |
95d6976d VG |
771 | } |
772 | EXPORT_SYMBOL(dma_cache_inv); | |
773 | ||
f5db19e9 | 774 | void dma_cache_wback(phys_addr_t start, unsigned long sz) |
95d6976d | 775 | { |
f2b0b25a | 776 | __dma_cache_wback(start, sz); |
95d6976d VG |
777 | } |
778 | EXPORT_SYMBOL(dma_cache_wback); | |
779 | ||
780 | /* | |
7586bf72 VG |
781 | * This is API for making I/D Caches consistent when modifying |
782 | * kernel code (loadable modules, kprobes, kgdb...) | |
95d6976d VG |
783 | * This is called on insmod, with kernel virtual address for CODE of |
784 | * the module. ARC cache maintenance ops require PHY address thus we | |
785 | * need to convert vmalloc addr to PHY addr | |
786 | */ | |
787 | void flush_icache_range(unsigned long kstart, unsigned long kend) | |
788 | { | |
c59414cc | 789 | unsigned int tot_sz; |
95d6976d | 790 | |
c59414cc | 791 | WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__); |
95d6976d VG |
792 | |
793 | /* Shortcut for bigger flush ranges. | |
794 | * Here we don't care if this was kernel virtual or phy addr | |
795 | */ | |
796 | tot_sz = kend - kstart; | |
797 | if (tot_sz > PAGE_SIZE) { | |
798 | flush_cache_all(); | |
799 | return; | |
800 | } | |
801 | ||
802 | /* Case: Kernel Phy addr (0x8000_0000 onwards) */ | |
803 | if (likely(kstart > PAGE_OFFSET)) { | |
7586bf72 VG |
804 | /* |
805 | * The 2nd arg despite being paddr will be used to index icache | |
806 | * This is OK since no alternate virtual mappings will exist | |
807 | * given the callers for this case: kprobe/kgdb in built-in | |
808 | * kernel code only. | |
809 | */ | |
94bad1af | 810 | __sync_icache_dcache(kstart, kstart, kend - kstart); |
95d6976d VG |
811 | return; |
812 | } | |
813 | ||
814 | /* | |
815 | * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) | |
816 | * (1) ARC Cache Maintenance ops only take Phy addr, hence special | |
817 | * handling of kernel vaddr. | |
818 | * | |
819 | * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), | |
820 | * it still needs to handle a 2 page scenario, where the range | |
821 | * straddles across 2 virtual pages and hence need for loop | |
822 | */ | |
823 | while (tot_sz > 0) { | |
c59414cc VG |
824 | unsigned int off, sz; |
825 | unsigned long phy, pfn; | |
826 | ||
95d6976d VG |
827 | off = kstart % PAGE_SIZE; |
828 | pfn = vmalloc_to_pfn((void *)kstart); | |
829 | phy = (pfn << PAGE_SHIFT) + off; | |
830 | sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); | |
94bad1af | 831 | __sync_icache_dcache(phy, kstart, sz); |
95d6976d VG |
832 | kstart += sz; |
833 | tot_sz -= sz; | |
834 | } | |
835 | } | |
e3560305 | 836 | EXPORT_SYMBOL(flush_icache_range); |
95d6976d VG |
837 | |
838 | /* | |
94bad1af VG |
839 | * General purpose helper to make I and D cache lines consistent. |
840 | * @paddr is phy addr of region | |
4b06ff35 VG |
841 | * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) |
842 | * However in one instance, when called by kprobe (for a breakpt in | |
94bad1af | 843 | * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will |
63d1dfd0 | 844 | * use a paddr to index the cache (despite VIPT). This is fine since a |
4b06ff35 VG |
845 | * builtin kernel page will not have any virtual mappings. |
846 | * kprobe on loadable module will be kernel vaddr. | |
95d6976d | 847 | */ |
28b4af72 | 848 | void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len) |
95d6976d | 849 | { |
f538881c | 850 | __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); |
2328af0c | 851 | __ic_line_inv_vaddr(paddr, vaddr, len); |
95d6976d VG |
852 | } |
853 | ||
24603fdd | 854 | /* wrapper to compile time eliminate alignment checks in flush loop */ |
ac4cfacc | 855 | void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr) |
95d6976d | 856 | { |
ac4cfacc | 857 | __ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE); |
95d6976d VG |
858 | } |
859 | ||
6ec18a81 VG |
860 | /* |
861 | * wrapper to clearout kernel or userspace mappings of a page | |
862 | * For kernel mappings @vaddr == @paddr | |
863 | */ | |
ac4cfacc | 864 | void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr) |
eacd0e95 | 865 | { |
ac4cfacc | 866 | __dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV); |
eacd0e95 VG |
867 | } |
868 | ||
95d6976d VG |
869 | noinline void flush_cache_all(void) |
870 | { | |
871 | unsigned long flags; | |
872 | ||
873 | local_irq_save(flags); | |
874 | ||
336e199e | 875 | __ic_entire_inv(); |
95d6976d VG |
876 | __dc_entire_op(OP_FLUSH_N_INV); |
877 | ||
878 | local_irq_restore(flags); | |
879 | ||
880 | } | |
881 | ||
4102b533 VG |
882 | void copy_user_highpage(struct page *to, struct page *from, |
883 | unsigned long u_vaddr, struct vm_area_struct *vma) | |
884 | { | |
ac4cfacc MWO |
885 | struct folio *src = page_folio(from); |
886 | struct folio *dst = page_folio(to); | |
336e2136 VG |
887 | void *kfrom = kmap_atomic(from); |
888 | void *kto = kmap_atomic(to); | |
4102b533 | 889 | |
336e2136 | 890 | copy_page(kto, kfrom); |
4102b533 | 891 | |
ac4cfacc | 892 | clear_bit(PG_dc_clean, &dst->flags); |
6732c0e4 | 893 | clear_bit(PG_dc_clean, &src->flags); |
336e2136 VG |
894 | |
895 | kunmap_atomic(kto); | |
896 | kunmap_atomic(kfrom); | |
4102b533 VG |
897 | } |
898 | ||
899 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) | |
900 | { | |
ac4cfacc | 901 | struct folio *folio = page_folio(page); |
4102b533 | 902 | clear_page(to); |
ac4cfacc | 903 | clear_bit(PG_dc_clean, &folio->flags); |
4102b533 | 904 | } |
6b5ff040 | 905 | EXPORT_SYMBOL(clear_user_page); |
4102b533 | 906 | |
95d6976d VG |
907 | /********************************************************************** |
908 | * Explicit Cache flush request from user space via syscall | |
909 | * Needed for JITs which generate code on the fly | |
910 | */ | |
911 | SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) | |
912 | { | |
913 | /* TBD: optimize this */ | |
914 | flush_cache_all(); | |
915 | return 0; | |
916 | } | |
8ea2ddff | 917 | |
8c47f83b VG |
918 | /* |
919 | * IO-Coherency (IOC) setup rules: | |
920 | * | |
921 | * 1. Needs to be at system level, so only once by Master core | |
922 | * Non-Masters need not be accessing caches at that time | |
923 | * - They are either HALT_ON_RESET and kick started much later or | |
924 | * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot() | |
925 | * doesn't perturb caches or coherency unit | |
926 | * | |
927 | * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC, | |
928 | * otherwise any straggler data might behave strangely post IOC enabling | |
929 | * | |
930 | * 3. All Caches need to be disabled when setting up IOC to elide any in-flight | |
931 | * Coherency transactions | |
932 | */ | |
4d369680 | 933 | static noinline void __init arc_ioc_setup(void) |
d4911cdd | 934 | { |
bee91c3a | 935 | unsigned int ioc_base, mem_sz; |
e497c8e5 | 936 | |
3624379d EP |
937 | /* |
938 | * If IOC was already enabled (due to bootloader) it technically needs to | |
939 | * be reconfigured with aperture base,size corresponding to Linux memory map | |
940 | * which will certainly be different than uboot's. But disabling and | |
941 | * reenabling IOC when DMA might be potentially active is tricky business. | |
942 | * To avoid random memory issues later, just panic here and ask user to | |
943 | * upgrade bootloader to one which doesn't enable IOC | |
944 | */ | |
945 | if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT) | |
946 | panic("IOC already enabled, please upgrade bootloader!\n"); | |
947 | ||
948 | if (!ioc_enable) | |
949 | return; | |
950 | ||
8c47f83b VG |
951 | /* Flush + invalidate + disable L1 dcache */ |
952 | __dc_disable(); | |
953 | ||
954 | /* Flush + invalidate SLC */ | |
955 | if (read_aux_reg(ARC_REG_SLC_BCR)) | |
956 | slc_entire_op(OP_FLUSH_N_INV); | |
957 | ||
e497c8e5 | 958 | /* |
bee91c3a | 959 | * currently IOC Aperture covers entire DDR |
e497c8e5 VG |
960 | * TBD: fix for PGU + 1GB of low mem |
961 | * TBD: fix for PAE | |
962 | */ | |
bee91c3a EP |
963 | mem_sz = arc_get_mem_sz(); |
964 | ||
965 | if (!is_power_of_2(mem_sz) || mem_sz < 4096) | |
966 | panic("IOC Aperture size must be power of 2 larger than 4KB"); | |
967 | ||
968 | /* | |
969 | * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB, | |
970 | * so setting 0x11 implies 512MB, 0x12 implies 1GB... | |
971 | */ | |
972 | write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2); | |
973 | ||
974 | /* for now assume kernel base is start of IOC aperture */ | |
9ed68785 | 975 | ioc_base = CONFIG_LINUX_RAM_BASE; |
bee91c3a EP |
976 | |
977 | if (ioc_base % mem_sz != 0) | |
978 | panic("IOC Aperture start must be aligned to the size of the aperture"); | |
8c47f83b | 979 | |
bee91c3a | 980 | write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12); |
3624379d EP |
981 | write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT); |
982 | write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT); | |
8c47f83b VG |
983 | |
984 | /* Re-enable L1 dcache */ | |
985 | __dc_enable(); | |
d4911cdd VG |
986 | } |
987 | ||
b5ddb6d5 VG |
988 | /* |
989 | * Cache related boot time checks/setups only needed on master CPU: | |
990 | * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES) | |
991 | * Assume SMP only, so all cores will have same cache config. A check on | |
992 | * one core suffices for all | |
993 | * - IOC setup / dma callbacks only need to be done once | |
994 | */ | |
4d369680 | 995 | static noinline void __init arc_cache_init_master(void) |
8ea2ddff | 996 | { |
8ea2ddff | 997 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { |
17a5ed56 | 998 | struct cpuinfo_arc_cache *ic = &ic_info; |
8ea2ddff | 999 | |
f64915be | 1000 | if (!ic->line_len) |
8ea2ddff VG |
1001 | panic("cache support enabled but non-existent cache\n"); |
1002 | ||
1003 | if (ic->line_len != L1_CACHE_BYTES) | |
1004 | panic("ICache line [%d] != kernel Config [%d]", | |
1005 | ic->line_len, L1_CACHE_BYTES); | |
1006 | ||
bcc4d65a | 1007 | /* |
2547476a | 1008 | * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG |
bcc4d65a VG |
1009 | * pair to provide vaddr/paddr respectively, just as in MMU v3 |
1010 | */ | |
17a5ed56 | 1011 | if (is_isa_arcv2() && ic->colors > 1) |
bcc4d65a VG |
1012 | _cache_line_loop_ic_fn = __cache_line_loop_v3; |
1013 | else | |
1014 | _cache_line_loop_ic_fn = __cache_line_loop; | |
8ea2ddff VG |
1015 | } |
1016 | ||
1017 | if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { | |
17a5ed56 | 1018 | struct cpuinfo_arc_cache *dc = &dc_info; |
8ea2ddff | 1019 | |
f64915be | 1020 | if (!dc->line_len) |
8ea2ddff VG |
1021 | panic("cache support enabled but non-existent cache\n"); |
1022 | ||
1023 | if (dc->line_len != L1_CACHE_BYTES) | |
1024 | panic("DCache line [%d] != kernel Config [%d]", | |
1025 | dc->line_len, L1_CACHE_BYTES); | |
1026 | ||
d1f317d8 | 1027 | /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ |
6732c0e4 VG |
1028 | if (is_isa_arcompact() && dc->colors > 1) { |
1029 | panic("Aliasing VIPT cache not supported\n"); | |
d1f317d8 | 1030 | } |
8ea2ddff | 1031 | } |
f2b0b25a | 1032 | |
386177da EP |
1033 | /* |
1034 | * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger | |
1035 | * or equal to any cache line length. | |
1036 | */ | |
1037 | BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES, | |
1038 | "SMP_CACHE_BYTES must be >= any cache line length"); | |
1039 | if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES)) | |
1040 | panic("L2 Cache line [%d] > kernel Config [%d]\n", | |
1041 | l2_line_sz, SMP_CACHE_BYTES); | |
1042 | ||
d4911cdd VG |
1043 | /* Note that SLC disable not formally supported till HS 3.0 */ |
1044 | if (is_isa_arcv2() && l2_line_sz && !slc_enable) | |
1045 | arc_slc_disable(); | |
79335a2c | 1046 | |
3624379d | 1047 | if (is_isa_arcv2() && ioc_exists) |
d4911cdd | 1048 | arc_ioc_setup(); |
79335a2c | 1049 | |
2820a708 | 1050 | if (is_isa_arcv2() && l2_line_sz && slc_enable) { |
f2b0b25a AB |
1051 | __dma_cache_wback_inv = __dma_cache_wback_inv_slc; |
1052 | __dma_cache_inv = __dma_cache_inv_slc; | |
1053 | __dma_cache_wback = __dma_cache_wback_slc; | |
1054 | } else { | |
1055 | __dma_cache_wback_inv = __dma_cache_wback_inv_l1; | |
1056 | __dma_cache_inv = __dma_cache_inv_l1; | |
1057 | __dma_cache_wback = __dma_cache_wback_l1; | |
1058 | } | |
2820a708 EP |
1059 | /* |
1060 | * In case of IOC (say IOC+SLC case), pointers above could still be set | |
1061 | * but end up not being relevant as the first function in chain is not | |
356da6d0 | 1062 | * called at all for devices using coherent DMA. |
2820a708 EP |
1063 | * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() |
1064 | */ | |
8ea2ddff | 1065 | } |
76894a72 VG |
1066 | |
1067 | void __ref arc_cache_init(void) | |
1068 | { | |
1069 | unsigned int __maybe_unused cpu = smp_processor_id(); | |
76894a72 | 1070 | |
76894a72 VG |
1071 | if (!cpu) |
1072 | arc_cache_init_master(); | |
b5ddb6d5 VG |
1073 | |
1074 | /* | |
1075 | * In PAE regime, TLB and cache maintenance ops take wider addresses | |
1076 | * And even if PAE is not enabled in kernel, the upper 32-bits still need | |
1077 | * to be zeroed to keep the ops sane. | |
1078 | * As an optimization for more common !PAE enabled case, zero them out | |
1079 | * once at init, rather than checking/setting to 0 for every runtime op | |
1080 | */ | |
1081 | if (is_isa_arcv2() && pae40_exist_but_not_enab()) { | |
1082 | ||
1083 | if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) | |
1084 | write_aux_reg(ARC_REG_IC_PTAG_HI, 0); | |
1085 | ||
1086 | if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) | |
1087 | write_aux_reg(ARC_REG_DC_PTAG_HI, 0); | |
1088 | ||
1089 | if (l2_line_sz) { | |
1090 | write_aux_reg(ARC_REG_SLC_RGN_END1, 0); | |
1091 | write_aux_reg(ARC_REG_SLC_RGN_START1, 0); | |
1092 | } | |
1093 | } | |
76894a72 | 1094 | } |