Commit | Line | Data |
---|---|---|
e7ecbc05 MY |
1 | /* |
2 | * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | */ | |
14 | ||
15 | #define pr_fmt(fmt) "uniphier: " fmt | |
16 | ||
17 | #include <linux/init.h> | |
18 | #include <linux/io.h> | |
19 | #include <linux/log2.h> | |
20 | #include <linux/of_address.h> | |
21 | #include <linux/slab.h> | |
22 | #include <asm/hardware/cache-uniphier.h> | |
23 | #include <asm/outercache.h> | |
24 | ||
25 | /* control registers */ | |
26 | #define UNIPHIER_SSCC 0x0 /* Control Register */ | |
27 | #define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */ | |
28 | #define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */ | |
29 | #define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */ | |
30 | #define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */ | |
31 | #define UNIPHIER_SSCC_ON BIT(0) /* enable cache */ | |
32 | #define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */ | |
33 | #define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */ | |
34 | ||
35 | /* revision registers */ | |
36 | #define UNIPHIER_SSCID 0x0 /* ID Register */ | |
37 | ||
38 | /* operation registers */ | |
39 | #define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */ | |
40 | #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */ | |
41 | #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */ | |
42 | #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */ | |
43 | #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */ | |
44 | #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */ | |
45 | #define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */ | |
46 | #define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21) | |
47 | #define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21) | |
48 | #define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21) | |
49 | #define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21) | |
50 | #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17) | |
51 | #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17) | |
52 | #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17) | |
53 | #define UNIPHIER_SSCOQM_S_WAY (0x2 << 17) | |
54 | #define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */ | |
55 | #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */ | |
56 | #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */ | |
57 | #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */ | |
58 | #define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */ | |
59 | #define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */ | |
60 | #define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */ | |
61 | #define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */ | |
62 | #define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */ | |
63 | #define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */ | |
64 | #define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */ | |
65 | #define UNIPHIER_SSCOQMASK 0x254 /* Cache Operation Queue Address Mask */ | |
66 | #define UNIPHIER_SSCOQWN 0x258 /* Cache Operation Queue Way Number */ | |
67 | #define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/ | |
68 | #define UNIPHIER_SSCOPPQSEF_FE BIT(1) | |
69 | #define UNIPHIER_SSCOPPQSEF_OE BIT(0) | |
70 | #define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */ | |
71 | #define UNIPHIER_SSCOLPQS_EF BIT(2) | |
72 | #define UNIPHIER_SSCOLPQS_EST BIT(1) | |
73 | #define UNIPHIER_SSCOLPQS_QST BIT(0) | |
74 | ||
75 | /* Is the touch/pre-fetch destination specified by ways? */ | |
76 | #define UNIPHIER_SSCOQM_TID_IS_WAY(op) \ | |
77 | ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY) | |
78 | /* Is the operation region specified by address range? */ | |
79 | #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \ | |
80 | ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE) | |
81 | ||
82 | /** | |
83 | * uniphier_cache_data - UniPhier outer cache specific data | |
84 | * | |
85 | * @ctrl_base: virtual base address of control registers | |
86 | * @rev_base: virtual base address of revision registers | |
87 | * @op_base: virtual base address of operation registers | |
88 | * @way_present_mask: each bit specifies if the way is present | |
89 | * @way_locked_mask: each bit specifies if the way is locked | |
90 | * @nsets: number of associativity sets | |
91 | * @line_size: line size in bytes | |
92 | * @range_op_max_size: max size that can be handled by a single range operation | |
93 | * @list: list node to include this level in the whole cache hierarchy | |
94 | */ | |
95 | struct uniphier_cache_data { | |
96 | void __iomem *ctrl_base; | |
97 | void __iomem *rev_base; | |
98 | void __iomem *op_base; | |
99 | u32 way_present_mask; | |
100 | u32 way_locked_mask; | |
101 | u32 nsets; | |
102 | u32 line_size; | |
103 | u32 range_op_max_size; | |
104 | struct list_head list; | |
105 | }; | |
106 | ||
107 | /* | |
108 | * List of the whole outer cache hierarchy. This list is only modified during | |
109 | * the early boot stage, so no mutex is taken for the access to the list. | |
110 | */ | |
111 | static LIST_HEAD(uniphier_cache_list); | |
112 | ||
113 | /** | |
114 | * __uniphier_cache_sync - perform a sync point for a particular cache level | |
115 | * | |
116 | * @data: cache controller specific data | |
117 | */ | |
118 | static void __uniphier_cache_sync(struct uniphier_cache_data *data) | |
119 | { | |
120 | /* This sequence need not be atomic. Do not disable IRQ. */ | |
121 | writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC, | |
122 | data->op_base + UNIPHIER_SSCOPE); | |
123 | /* need a read back to confirm */ | |
124 | readl_relaxed(data->op_base + UNIPHIER_SSCOPE); | |
125 | } | |
126 | ||
127 | /** | |
128 | * __uniphier_cache_maint_common - run a queue operation for a particular level | |
129 | * | |
130 | * @data: cache controller specific data | |
131 | * @start: start address of range operation (don't care for "all" operation) | |
132 | * @size: data size of range operation (don't care for "all" operation) | |
133 | * @operation: flags to specify the desired cache operation | |
134 | */ | |
135 | static void __uniphier_cache_maint_common(struct uniphier_cache_data *data, | |
136 | unsigned long start, | |
137 | unsigned long size, | |
138 | u32 operation) | |
139 | { | |
140 | unsigned long flags; | |
141 | ||
142 | /* | |
143 | * No spin lock is necessary here because: | |
144 | * | |
145 | * [1] This outer cache controller is able to accept maintenance | |
146 | * operations from multiple CPUs at a time in an SMP system; if a | |
147 | * maintenance operation is under way and another operation is issued, | |
148 | * the new one is stored in the queue. The controller performs one | |
149 | * operation after another. If the queue is full, the status register, | |
150 | * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has | |
151 | * failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have | |
152 | * different instances for each CPU, i.e. each CPU can track the status | |
153 | * of the maintenance operations triggered by itself. | |
154 | * | |
155 | * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ, | |
156 | * SSCOQWN}, are shared between multiple CPUs, but the hardware still | |
157 | * guarantees the registration sequence is atomic; the write access to | |
158 | * them are arbitrated by the hardware. The first accessor to the | |
159 | * register, UNIPHIER_SSCOQM, holds the access right and it is released | |
160 | * by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU | |
161 | * is holding the access right, other CPUs fail to register operations. | |
162 | * One CPU should not hold the access right for a long time, so local | |
163 | * IRQs should be disabled while the following sequence. | |
164 | */ | |
165 | local_irq_save(flags); | |
166 | ||
167 | /* clear the complete notification flag */ | |
168 | writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS); | |
169 | ||
170 | do { | |
171 | /* set cache operation */ | |
172 | writel_relaxed(UNIPHIER_SSCOQM_CE | operation, | |
173 | data->op_base + UNIPHIER_SSCOQM); | |
174 | ||
175 | /* set address range if needed */ | |
176 | if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) { | |
177 | writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD); | |
178 | writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ); | |
179 | } | |
180 | ||
181 | /* set target ways if needed */ | |
182 | if (unlikely(UNIPHIER_SSCOQM_TID_IS_WAY(operation))) | |
183 | writel_relaxed(data->way_locked_mask, | |
184 | data->op_base + UNIPHIER_SSCOQWN); | |
185 | } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) & | |
186 | (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE))); | |
187 | ||
188 | /* wait until the operation is completed */ | |
189 | while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) != | |
190 | UNIPHIER_SSCOLPQS_EF)) | |
191 | cpu_relax(); | |
192 | ||
193 | local_irq_restore(flags); | |
194 | } | |
195 | ||
196 | static void __uniphier_cache_maint_all(struct uniphier_cache_data *data, | |
197 | u32 operation) | |
198 | { | |
199 | __uniphier_cache_maint_common(data, 0, 0, | |
200 | UNIPHIER_SSCOQM_S_ALL | operation); | |
201 | ||
202 | __uniphier_cache_sync(data); | |
203 | } | |
204 | ||
205 | static void __uniphier_cache_maint_range(struct uniphier_cache_data *data, | |
206 | unsigned long start, unsigned long end, | |
207 | u32 operation) | |
208 | { | |
209 | unsigned long size; | |
210 | ||
211 | /* | |
212 | * If the start address is not aligned, | |
213 | * perform a cache operation for the first cache-line | |
214 | */ | |
215 | start = start & ~(data->line_size - 1); | |
216 | ||
217 | size = end - start; | |
218 | ||
219 | if (unlikely(size >= (unsigned long)(-data->line_size))) { | |
220 | /* this means cache operation for all range */ | |
221 | __uniphier_cache_maint_all(data, operation); | |
222 | return; | |
223 | } | |
224 | ||
225 | /* | |
226 | * If the end address is not aligned, | |
227 | * perform a cache operation for the last cache-line | |
228 | */ | |
229 | size = ALIGN(size, data->line_size); | |
230 | ||
231 | while (size) { | |
232 | unsigned long chunk_size = min_t(unsigned long, size, | |
233 | data->range_op_max_size); | |
234 | ||
235 | __uniphier_cache_maint_common(data, start, chunk_size, | |
236 | UNIPHIER_SSCOQM_S_RANGE | operation); | |
237 | ||
238 | start += chunk_size; | |
239 | size -= chunk_size; | |
240 | } | |
241 | ||
242 | __uniphier_cache_sync(data); | |
243 | } | |
244 | ||
245 | static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on) | |
246 | { | |
247 | u32 val = 0; | |
248 | ||
249 | if (on) | |
250 | val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON; | |
251 | ||
252 | writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC); | |
253 | } | |
254 | ||
255 | static void __init __uniphier_cache_set_locked_ways( | |
256 | struct uniphier_cache_data *data, | |
257 | u32 way_mask) | |
258 | { | |
259 | data->way_locked_mask = way_mask & data->way_present_mask; | |
260 | ||
261 | writel_relaxed(~data->way_locked_mask & data->way_present_mask, | |
262 | data->ctrl_base + UNIPHIER_SSCLPDAWCR); | |
263 | } | |
264 | ||
265 | static void uniphier_cache_maint_range(unsigned long start, unsigned long end, | |
266 | u32 operation) | |
267 | { | |
268 | struct uniphier_cache_data *data; | |
269 | ||
270 | list_for_each_entry(data, &uniphier_cache_list, list) | |
271 | __uniphier_cache_maint_range(data, start, end, operation); | |
272 | } | |
273 | ||
274 | static void uniphier_cache_maint_all(u32 operation) | |
275 | { | |
276 | struct uniphier_cache_data *data; | |
277 | ||
278 | list_for_each_entry(data, &uniphier_cache_list, list) | |
279 | __uniphier_cache_maint_all(data, operation); | |
280 | } | |
281 | ||
282 | static void uniphier_cache_inv_range(unsigned long start, unsigned long end) | |
283 | { | |
284 | uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV); | |
285 | } | |
286 | ||
287 | static void uniphier_cache_clean_range(unsigned long start, unsigned long end) | |
288 | { | |
289 | uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN); | |
290 | } | |
291 | ||
292 | static void uniphier_cache_flush_range(unsigned long start, unsigned long end) | |
293 | { | |
294 | uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH); | |
295 | } | |
296 | ||
297 | static void __init uniphier_cache_inv_all(void) | |
298 | { | |
299 | uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV); | |
300 | } | |
301 | ||
302 | static void uniphier_cache_flush_all(void) | |
303 | { | |
304 | uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH); | |
305 | } | |
306 | ||
307 | static void uniphier_cache_disable(void) | |
308 | { | |
309 | struct uniphier_cache_data *data; | |
310 | ||
311 | list_for_each_entry_reverse(data, &uniphier_cache_list, list) | |
312 | __uniphier_cache_enable(data, false); | |
313 | ||
314 | uniphier_cache_flush_all(); | |
315 | } | |
316 | ||
317 | static void __init uniphier_cache_enable(void) | |
318 | { | |
319 | struct uniphier_cache_data *data; | |
320 | ||
321 | uniphier_cache_inv_all(); | |
322 | ||
323 | list_for_each_entry(data, &uniphier_cache_list, list) { | |
324 | __uniphier_cache_enable(data, true); | |
325 | __uniphier_cache_set_locked_ways(data, 0); | |
326 | } | |
327 | } | |
328 | ||
329 | static void uniphier_cache_sync(void) | |
330 | { | |
331 | struct uniphier_cache_data *data; | |
332 | ||
333 | list_for_each_entry(data, &uniphier_cache_list, list) | |
334 | __uniphier_cache_sync(data); | |
335 | } | |
336 | ||
337 | int __init uniphier_cache_l2_is_enabled(void) | |
338 | { | |
339 | struct uniphier_cache_data *data; | |
340 | ||
341 | data = list_first_entry_or_null(&uniphier_cache_list, | |
342 | struct uniphier_cache_data, list); | |
343 | if (!data) | |
344 | return 0; | |
345 | ||
346 | return !!(readl_relaxed(data->ctrl_base + UNIPHIER_SSCC) & | |
347 | UNIPHIER_SSCC_ON); | |
348 | } | |
349 | ||
350 | void __init uniphier_cache_l2_touch_range(unsigned long start, | |
351 | unsigned long end) | |
352 | { | |
353 | struct uniphier_cache_data *data; | |
354 | ||
355 | data = list_first_entry_or_null(&uniphier_cache_list, | |
356 | struct uniphier_cache_data, list); | |
357 | if (data) | |
358 | __uniphier_cache_maint_range(data, start, end, | |
359 | UNIPHIER_SSCOQM_TID_WAY | | |
360 | UNIPHIER_SSCOQM_CM_TOUCH); | |
361 | } | |
362 | ||
363 | void __init uniphier_cache_l2_set_locked_ways(u32 way_mask) | |
364 | { | |
365 | struct uniphier_cache_data *data; | |
366 | ||
367 | data = list_first_entry_or_null(&uniphier_cache_list, | |
368 | struct uniphier_cache_data, list); | |
369 | if (data) | |
370 | __uniphier_cache_set_locked_ways(data, way_mask); | |
371 | } | |
372 | ||
373 | static const struct of_device_id uniphier_cache_match[] __initconst = { | |
374 | { | |
375 | .compatible = "socionext,uniphier-system-cache", | |
376 | }, | |
377 | { /* sentinel */ } | |
378 | }; | |
379 | ||
380 | static struct device_node * __init uniphier_cache_get_next_level_node( | |
381 | struct device_node *np) | |
382 | { | |
383 | u32 phandle; | |
384 | ||
385 | if (of_property_read_u32(np, "next-level-cache", &phandle)) | |
386 | return NULL; | |
387 | ||
388 | return of_find_node_by_phandle(phandle); | |
389 | } | |
390 | ||
391 | static int __init __uniphier_cache_init(struct device_node *np, | |
392 | unsigned int *cache_level) | |
393 | { | |
394 | struct uniphier_cache_data *data; | |
395 | u32 level, cache_size; | |
396 | struct device_node *next_np; | |
397 | int ret = 0; | |
398 | ||
399 | if (!of_match_node(uniphier_cache_match, np)) { | |
400 | pr_err("L%d: not compatible with uniphier cache\n", | |
401 | *cache_level); | |
402 | return -EINVAL; | |
403 | } | |
404 | ||
405 | if (of_property_read_u32(np, "cache-level", &level)) { | |
406 | pr_err("L%d: cache-level is not specified\n", *cache_level); | |
407 | return -EINVAL; | |
408 | } | |
409 | ||
410 | if (level != *cache_level) { | |
411 | pr_err("L%d: cache-level is unexpected value %d\n", | |
412 | *cache_level, level); | |
413 | return -EINVAL; | |
414 | } | |
415 | ||
416 | if (!of_property_read_bool(np, "cache-unified")) { | |
417 | pr_err("L%d: cache-unified is not specified\n", *cache_level); | |
418 | return -EINVAL; | |
419 | } | |
420 | ||
421 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
422 | if (!data) | |
423 | return -ENOMEM; | |
424 | ||
425 | if (of_property_read_u32(np, "cache-line-size", &data->line_size) || | |
426 | !is_power_of_2(data->line_size)) { | |
427 | pr_err("L%d: cache-line-size is unspecified or invalid\n", | |
428 | *cache_level); | |
429 | ret = -EINVAL; | |
430 | goto err; | |
431 | } | |
432 | ||
433 | if (of_property_read_u32(np, "cache-sets", &data->nsets) || | |
434 | !is_power_of_2(data->nsets)) { | |
435 | pr_err("L%d: cache-sets is unspecified or invalid\n", | |
436 | *cache_level); | |
437 | ret = -EINVAL; | |
438 | goto err; | |
439 | } | |
440 | ||
441 | if (of_property_read_u32(np, "cache-size", &cache_size) || | |
442 | cache_size == 0 || cache_size % (data->nsets * data->line_size)) { | |
443 | pr_err("L%d: cache-size is unspecified or invalid\n", | |
444 | *cache_level); | |
445 | ret = -EINVAL; | |
446 | goto err; | |
447 | } | |
448 | ||
449 | data->way_present_mask = | |
450 | ((u32)1 << cache_size / data->nsets / data->line_size) - 1; | |
451 | ||
452 | data->ctrl_base = of_iomap(np, 0); | |
453 | if (!data->ctrl_base) { | |
454 | pr_err("L%d: failed to map control register\n", *cache_level); | |
455 | ret = -ENOMEM; | |
456 | goto err; | |
457 | } | |
458 | ||
459 | data->rev_base = of_iomap(np, 1); | |
460 | if (!data->rev_base) { | |
461 | pr_err("L%d: failed to map revision register\n", *cache_level); | |
462 | ret = -ENOMEM; | |
463 | goto err; | |
464 | } | |
465 | ||
466 | data->op_base = of_iomap(np, 2); | |
467 | if (!data->op_base) { | |
468 | pr_err("L%d: failed to map operation register\n", *cache_level); | |
469 | ret = -ENOMEM; | |
470 | goto err; | |
471 | } | |
472 | ||
473 | if (*cache_level == 2) { | |
474 | u32 revision = readl(data->rev_base + UNIPHIER_SSCID); | |
475 | /* | |
476 | * The size of range operation is limited to (1 << 22) or less | |
477 | * for PH-sLD8 or older SoCs. | |
478 | */ | |
479 | if (revision <= 0x16) | |
480 | data->range_op_max_size = (u32)1 << 22; | |
481 | } | |
482 | ||
483 | data->range_op_max_size -= data->line_size; | |
484 | ||
485 | INIT_LIST_HEAD(&data->list); | |
486 | list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */ | |
487 | ||
488 | /* | |
489 | * OK, this level has been successfully initialized. Look for the next | |
490 | * level cache. Do not roll back even if the initialization of the | |
491 | * next level cache fails because we want to continue with available | |
492 | * cache levels. | |
493 | */ | |
494 | next_np = uniphier_cache_get_next_level_node(np); | |
495 | if (next_np) { | |
496 | (*cache_level)++; | |
497 | ret = __uniphier_cache_init(next_np, cache_level); | |
498 | } | |
499 | of_node_put(next_np); | |
500 | ||
501 | return ret; | |
502 | err: | |
503 | iounmap(data->op_base); | |
504 | iounmap(data->rev_base); | |
505 | iounmap(data->ctrl_base); | |
506 | kfree(data); | |
507 | ||
508 | return ret; | |
509 | } | |
510 | ||
511 | int __init uniphier_cache_init(void) | |
512 | { | |
513 | struct device_node *np = NULL; | |
514 | unsigned int cache_level; | |
515 | int ret = 0; | |
516 | ||
517 | /* look for level 2 cache */ | |
518 | while ((np = of_find_matching_node(np, uniphier_cache_match))) | |
519 | if (!of_property_read_u32(np, "cache-level", &cache_level) && | |
520 | cache_level == 2) | |
521 | break; | |
522 | ||
523 | if (!np) | |
524 | return -ENODEV; | |
525 | ||
526 | ret = __uniphier_cache_init(np, &cache_level); | |
527 | of_node_put(np); | |
528 | ||
529 | if (ret) { | |
530 | /* | |
531 | * Error out iif L2 initialization fails. Continue with any | |
532 | * error on L3 or outer because they are optional. | |
533 | */ | |
534 | if (cache_level == 2) { | |
535 | pr_err("failed to initialize L2 cache\n"); | |
536 | return ret; | |
537 | } | |
538 | ||
539 | cache_level--; | |
540 | ret = 0; | |
541 | } | |
542 | ||
543 | outer_cache.inv_range = uniphier_cache_inv_range; | |
544 | outer_cache.clean_range = uniphier_cache_clean_range; | |
545 | outer_cache.flush_range = uniphier_cache_flush_range; | |
546 | outer_cache.flush_all = uniphier_cache_flush_all; | |
547 | outer_cache.disable = uniphier_cache_disable; | |
548 | outer_cache.sync = uniphier_cache_sync; | |
549 | ||
550 | uniphier_cache_enable(); | |
551 | ||
552 | pr_info("enabled outer cache (cache level: %d)\n", cache_level); | |
553 | ||
554 | return ret; | |
555 | } |