Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
93197a36 NL |
2 | /* |
3 | * Processor cache information made available to userspace via sysfs; | |
4 | * intended to be compatible with x86 intel_cacheinfo implementation. | |
5 | * | |
6 | * Copyright 2008 IBM Corporation | |
7 | * Author: Nathan Lynch | |
93197a36 | 8 | */ |
e2b3c165 NL |
9 | |
10 | #define pr_fmt(fmt) "cacheinfo: " fmt | |
93197a36 NL |
11 | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/cpumask.h> | |
93197a36 NL |
14 | #include <linux/kernel.h> |
15 | #include <linux/kobject.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/notifier.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/percpu.h> | |
5a0e3ad6 | 20 | #include <linux/slab.h> |
500fe5f5 GS |
21 | #include <asm/cputhreads.h> |
22 | #include <asm/smp.h> | |
93197a36 NL |
23 | |
24 | #include "cacheinfo.h" | |
25 | ||
26 | /* per-cpu object for tracking: | |
27 | * - a "cache" kobject for the top-level directory | |
28 | * - a list of "index" objects representing the cpu's local cache hierarchy | |
29 | */ | |
30 | struct cache_dir { | |
31 | struct kobject *kobj; /* bare (not embedded) kobject for cache | |
32 | * directory */ | |
33 | struct cache_index_dir *index; /* list of index objects */ | |
34 | }; | |
35 | ||
36 | /* "index" object: each cpu's cache directory has an index | |
37 | * subdirectory corresponding to a cache object associated with the | |
38 | * cpu. This object's lifetime is managed via the embedded kobject. | |
39 | */ | |
40 | struct cache_index_dir { | |
41 | struct kobject kobj; | |
42 | struct cache_index_dir *next; /* next index in parent directory */ | |
43 | struct cache *cache; | |
44 | }; | |
45 | ||
46 | /* Template for determining which OF properties to query for a given | |
47 | * cache type */ | |
48 | struct cache_type_info { | |
49 | const char *name; | |
50 | const char *size_prop; | |
51 | ||
52 | /* Allow for both [di]-cache-line-size and | |
53 | * [di]-cache-block-size properties. According to the PowerPC | |
54 | * Processor binding, -line-size should be provided if it | |
55 | * differs from the cache block size (that which is operated | |
56 | * on by cache instructions), so we look for -line-size first. | |
57 | * See cache_get_line_size(). */ | |
58 | ||
59 | const char *line_size_props[2]; | |
60 | const char *nr_sets_prop; | |
61 | }; | |
62 | ||
63 | /* These are used to index the cache_type_info array. */ | |
f7e9e358 DO |
64 | #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */ |
65 | #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */ | |
66 | #define CACHE_TYPE_INSTRUCTION 2 | |
67 | #define CACHE_TYPE_DATA 3 | |
93197a36 NL |
68 | |
69 | static const struct cache_type_info cache_type_info[] = { | |
f7e9e358 DO |
70 | { |
71 | /* Embedded systems that use cache-size, cache-block-size, | |
72 | * etc. for the Unified (typically L2) cache. */ | |
73 | .name = "Unified", | |
74 | .size_prop = "cache-size", | |
75 | .line_size_props = { "cache-line-size", | |
76 | "cache-block-size", }, | |
77 | .nr_sets_prop = "cache-sets", | |
78 | }, | |
93197a36 NL |
79 | { |
80 | /* PowerPC Processor binding says the [di]-cache-* | |
81 | * must be equal on unified caches, so just use | |
82 | * d-cache properties. */ | |
83 | .name = "Unified", | |
84 | .size_prop = "d-cache-size", | |
85 | .line_size_props = { "d-cache-line-size", | |
86 | "d-cache-block-size", }, | |
87 | .nr_sets_prop = "d-cache-sets", | |
88 | }, | |
89 | { | |
90 | .name = "Instruction", | |
91 | .size_prop = "i-cache-size", | |
92 | .line_size_props = { "i-cache-line-size", | |
93 | "i-cache-block-size", }, | |
94 | .nr_sets_prop = "i-cache-sets", | |
95 | }, | |
96 | { | |
97 | .name = "Data", | |
98 | .size_prop = "d-cache-size", | |
99 | .line_size_props = { "d-cache-line-size", | |
100 | "d-cache-block-size", }, | |
101 | .nr_sets_prop = "d-cache-sets", | |
102 | }, | |
103 | }; | |
104 | ||
105 | /* Cache object: each instance of this corresponds to a distinct cache | |
106 | * in the system. There are separate objects for Harvard caches: one | |
107 | * each for instruction and data, and each refers to the same OF node. | |
108 | * The refcount of the OF node is elevated for the lifetime of the | |
109 | * cache object. A cache object is released when its shared_cpu_map | |
110 | * is cleared (see cache_cpu_clear). | |
111 | * | |
112 | * A cache object is on two lists: an unsorted global list | |
113 | * (cache_list) of cache objects; and a singly-linked list | |
114 | * representing the local cache hierarchy, which is ordered by level | |
115 | * (e.g. L1d -> L1i -> L2 -> L3). | |
116 | */ | |
117 | struct cache { | |
118 | struct device_node *ofnode; /* OF node for this cache, may be cpu */ | |
119 | struct cpumask shared_cpu_map; /* online CPUs using this cache */ | |
120 | int type; /* split cache disambiguation */ | |
121 | int level; /* level not explicit in device tree */ | |
a4bec516 | 122 | int group_id; /* id of the group of threads that share this cache */ |
93197a36 NL |
123 | struct list_head list; /* global list of cache objects */ |
124 | struct cache *next_local; /* next cache of >= level */ | |
125 | }; | |
126 | ||
fc7a9feb | 127 | static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu); |
93197a36 NL |
128 | |
129 | /* traversal/modification of this list occurs only at cpu hotplug time; | |
130 | * access is serialized by cpu hotplug locking | |
131 | */ | |
132 | static LIST_HEAD(cache_list); | |
133 | ||
134 | static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k) | |
135 | { | |
136 | return container_of(k, struct cache_index_dir, kobj); | |
137 | } | |
138 | ||
139 | static const char *cache_type_string(const struct cache *cache) | |
140 | { | |
141 | return cache_type_info[cache->type].name; | |
142 | } | |
143 | ||
061d19f2 | 144 | static void cache_init(struct cache *cache, int type, int level, |
a4bec516 | 145 | struct device_node *ofnode, int group_id) |
93197a36 NL |
146 | { |
147 | cache->type = type; | |
148 | cache->level = level; | |
149 | cache->ofnode = of_node_get(ofnode); | |
a4bec516 | 150 | cache->group_id = group_id; |
93197a36 NL |
151 | INIT_LIST_HEAD(&cache->list); |
152 | list_add(&cache->list, &cache_list); | |
153 | } | |
154 | ||
a4bec516 GS |
155 | static struct cache *new_cache(int type, int level, |
156 | struct device_node *ofnode, int group_id) | |
93197a36 NL |
157 | { |
158 | struct cache *cache; | |
159 | ||
160 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); | |
161 | if (cache) | |
a4bec516 | 162 | cache_init(cache, type, level, ofnode, group_id); |
93197a36 NL |
163 | |
164 | return cache; | |
165 | } | |
166 | ||
167 | static void release_cache_debugcheck(struct cache *cache) | |
168 | { | |
169 | struct cache *iter; | |
170 | ||
171 | list_for_each_entry(iter, &cache_list, list) | |
172 | WARN_ONCE(iter->next_local == cache, | |
be6f885e | 173 | "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n", |
b7c670d6 | 174 | iter->ofnode, |
93197a36 | 175 | cache_type_string(iter), |
b7c670d6 | 176 | cache->ofnode, |
93197a36 NL |
177 | cache_type_string(cache)); |
178 | } | |
179 | ||
180 | static void release_cache(struct cache *cache) | |
181 | { | |
182 | if (!cache) | |
183 | return; | |
184 | ||
be6f885e | 185 | pr_debug("freeing L%d %s cache for %pOFP\n", cache->level, |
b7c670d6 | 186 | cache_type_string(cache), cache->ofnode); |
93197a36 NL |
187 | |
188 | release_cache_debugcheck(cache); | |
189 | list_del(&cache->list); | |
190 | of_node_put(cache->ofnode); | |
191 | kfree(cache); | |
192 | } | |
193 | ||
194 | static void cache_cpu_set(struct cache *cache, int cpu) | |
195 | { | |
196 | struct cache *next = cache; | |
197 | ||
198 | while (next) { | |
199 | WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), | |
be6f885e | 200 | "CPU %i already accounted in %pOFP(%s)\n", |
b7c670d6 | 201 | cpu, next->ofnode, |
93197a36 NL |
202 | cache_type_string(next)); |
203 | cpumask_set_cpu(cpu, &next->shared_cpu_map); | |
204 | next = next->next_local; | |
205 | } | |
206 | } | |
207 | ||
208 | static int cache_size(const struct cache *cache, unsigned int *ret) | |
209 | { | |
210 | const char *propname; | |
d10bd84f | 211 | const __be32 *cache_size; |
93197a36 NL |
212 | |
213 | propname = cache_type_info[cache->type].size_prop; | |
214 | ||
215 | cache_size = of_get_property(cache->ofnode, propname, NULL); | |
216 | if (!cache_size) | |
217 | return -ENODEV; | |
218 | ||
d10bd84f | 219 | *ret = of_read_number(cache_size, 1); |
93197a36 NL |
220 | return 0; |
221 | } | |
222 | ||
223 | static int cache_size_kb(const struct cache *cache, unsigned int *ret) | |
224 | { | |
225 | unsigned int size; | |
226 | ||
227 | if (cache_size(cache, &size)) | |
228 | return -ENODEV; | |
229 | ||
230 | *ret = size / 1024; | |
231 | return 0; | |
232 | } | |
233 | ||
234 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ | |
235 | static int cache_get_line_size(const struct cache *cache, unsigned int *ret) | |
236 | { | |
d10bd84f | 237 | const __be32 *line_size; |
93197a36 NL |
238 | int i, lim; |
239 | ||
240 | lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); | |
241 | ||
242 | for (i = 0; i < lim; i++) { | |
243 | const char *propname; | |
244 | ||
245 | propname = cache_type_info[cache->type].line_size_props[i]; | |
246 | line_size = of_get_property(cache->ofnode, propname, NULL); | |
247 | if (line_size) | |
248 | break; | |
249 | } | |
250 | ||
251 | if (!line_size) | |
252 | return -ENODEV; | |
253 | ||
d10bd84f | 254 | *ret = of_read_number(line_size, 1); |
93197a36 NL |
255 | return 0; |
256 | } | |
257 | ||
258 | static int cache_nr_sets(const struct cache *cache, unsigned int *ret) | |
259 | { | |
260 | const char *propname; | |
d10bd84f | 261 | const __be32 *nr_sets; |
93197a36 NL |
262 | |
263 | propname = cache_type_info[cache->type].nr_sets_prop; | |
264 | ||
265 | nr_sets = of_get_property(cache->ofnode, propname, NULL); | |
266 | if (!nr_sets) | |
267 | return -ENODEV; | |
268 | ||
d10bd84f | 269 | *ret = of_read_number(nr_sets, 1); |
93197a36 NL |
270 | return 0; |
271 | } | |
272 | ||
273 | static int cache_associativity(const struct cache *cache, unsigned int *ret) | |
274 | { | |
275 | unsigned int line_size; | |
276 | unsigned int nr_sets; | |
277 | unsigned int size; | |
278 | ||
279 | if (cache_nr_sets(cache, &nr_sets)) | |
280 | goto err; | |
281 | ||
282 | /* If the cache is fully associative, there is no need to | |
283 | * check the other properties. | |
284 | */ | |
285 | if (nr_sets == 1) { | |
286 | *ret = 0; | |
287 | return 0; | |
288 | } | |
289 | ||
290 | if (cache_get_line_size(cache, &line_size)) | |
291 | goto err; | |
292 | if (cache_size(cache, &size)) | |
293 | goto err; | |
294 | ||
295 | if (!(nr_sets > 0 && size > 0 && line_size > 0)) | |
296 | goto err; | |
297 | ||
298 | *ret = (size / nr_sets) / line_size; | |
299 | return 0; | |
300 | err: | |
301 | return -ENODEV; | |
302 | } | |
303 | ||
304 | /* helper for dealing with split caches */ | |
305 | static struct cache *cache_find_first_sibling(struct cache *cache) | |
306 | { | |
307 | struct cache *iter; | |
308 | ||
f7e9e358 DO |
309 | if (cache->type == CACHE_TYPE_UNIFIED || |
310 | cache->type == CACHE_TYPE_UNIFIED_D) | |
93197a36 NL |
311 | return cache; |
312 | ||
313 | list_for_each_entry(iter, &cache_list, list) | |
a4bec516 GS |
314 | if (iter->ofnode == cache->ofnode && |
315 | iter->group_id == cache->group_id && | |
316 | iter->next_local == cache) | |
93197a36 NL |
317 | return iter; |
318 | ||
319 | return cache; | |
320 | } | |
321 | ||
a4bec516 GS |
322 | /* return the first cache on a local list matching node and thread-group id */ |
323 | static struct cache *cache_lookup_by_node_group(const struct device_node *node, | |
324 | int group_id) | |
93197a36 NL |
325 | { |
326 | struct cache *cache = NULL; | |
327 | struct cache *iter; | |
328 | ||
329 | list_for_each_entry(iter, &cache_list, list) { | |
a4bec516 GS |
330 | if (iter->ofnode != node || |
331 | iter->group_id != group_id) | |
93197a36 NL |
332 | continue; |
333 | cache = cache_find_first_sibling(iter); | |
334 | break; | |
335 | } | |
336 | ||
337 | return cache; | |
338 | } | |
339 | ||
340 | static bool cache_node_is_unified(const struct device_node *np) | |
341 | { | |
342 | return of_get_property(np, "cache-unified", NULL); | |
343 | } | |
344 | ||
f7e9e358 DO |
345 | /* |
346 | * Unified caches can have two different sets of tags. Most embedded | |
347 | * use cache-size, etc. for the unified cache size, but open firmware systems | |
348 | * use d-cache-size, etc. Check on initialization for which type we have, and | |
349 | * return the appropriate structure type. Assume it's embedded if it isn't | |
350 | * open firmware. If it's yet a 3rd type, then there will be missing entries | |
351 | * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need | |
352 | * to be extended further. | |
353 | */ | |
354 | static int cache_is_unified_d(const struct device_node *np) | |
93197a36 | 355 | { |
f7e9e358 DO |
356 | return of_get_property(np, |
357 | cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ? | |
358 | CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; | |
359 | } | |
93197a36 | 360 | |
a4bec516 GS |
361 | static struct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id, |
362 | int level) | |
f7e9e358 | 363 | { |
be6f885e | 364 | pr_debug("creating L%d ucache for %pOFP\n", level, node); |
93197a36 | 365 | |
a4bec516 | 366 | return new_cache(cache_is_unified_d(node), level, node, group_id); |
93197a36 NL |
367 | } |
368 | ||
a4bec516 | 369 | static struct cache *cache_do_one_devnode_split(struct device_node *node, int group_id, |
061d19f2 | 370 | int level) |
93197a36 NL |
371 | { |
372 | struct cache *dcache, *icache; | |
373 | ||
be6f885e | 374 | pr_debug("creating L%d dcache and icache for %pOFP\n", level, |
b7c670d6 | 375 | node); |
93197a36 | 376 | |
a4bec516 GS |
377 | dcache = new_cache(CACHE_TYPE_DATA, level, node, group_id); |
378 | icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node, group_id); | |
93197a36 NL |
379 | |
380 | if (!dcache || !icache) | |
381 | goto err; | |
382 | ||
383 | dcache->next_local = icache; | |
384 | ||
385 | return dcache; | |
386 | err: | |
387 | release_cache(dcache); | |
388 | release_cache(icache); | |
389 | return NULL; | |
390 | } | |
391 | ||
a4bec516 | 392 | static struct cache *cache_do_one_devnode(struct device_node *node, int group_id, int level) |
93197a36 NL |
393 | { |
394 | struct cache *cache; | |
395 | ||
396 | if (cache_node_is_unified(node)) | |
a4bec516 | 397 | cache = cache_do_one_devnode_unified(node, group_id, level); |
93197a36 | 398 | else |
a4bec516 | 399 | cache = cache_do_one_devnode_split(node, group_id, level); |
93197a36 NL |
400 | |
401 | return cache; | |
402 | } | |
403 | ||
061d19f2 | 404 | static struct cache *cache_lookup_or_instantiate(struct device_node *node, |
a4bec516 | 405 | int group_id, |
061d19f2 | 406 | int level) |
93197a36 NL |
407 | { |
408 | struct cache *cache; | |
409 | ||
a4bec516 | 410 | cache = cache_lookup_by_node_group(node, group_id); |
93197a36 NL |
411 | |
412 | WARN_ONCE(cache && cache->level != level, | |
413 | "cache level mismatch on lookup (got %d, expected %d)\n", | |
414 | cache->level, level); | |
415 | ||
416 | if (!cache) | |
a4bec516 | 417 | cache = cache_do_one_devnode(node, group_id, level); |
93197a36 NL |
418 | |
419 | return cache; | |
420 | } | |
421 | ||
061d19f2 | 422 | static void link_cache_lists(struct cache *smaller, struct cache *bigger) |
93197a36 NL |
423 | { |
424 | while (smaller->next_local) { | |
425 | if (smaller->next_local == bigger) | |
426 | return; /* already linked */ | |
427 | smaller = smaller->next_local; | |
428 | } | |
429 | ||
430 | smaller->next_local = bigger; | |
6ec54363 NL |
431 | |
432 | /* | |
433 | * The cache->next_local list sorts by level ascending: | |
434 | * L1d -> L1i -> L2 -> L3 ... | |
435 | */ | |
436 | WARN_ONCE((smaller->level == 1 && bigger->level > 2) || | |
437 | (smaller->level > 1 && bigger->level != smaller->level + 1), | |
438 | "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n", | |
439 | smaller->level, smaller->ofnode, bigger->level, bigger->ofnode); | |
93197a36 NL |
440 | } |
441 | ||
061d19f2 | 442 | static void do_subsidiary_caches_debugcheck(struct cache *cache) |
93197a36 | 443 | { |
1b3da8ff NL |
444 | WARN_ONCE(cache->level != 1, |
445 | "instantiating cache chain from L%d %s cache for " | |
446 | "%pOFP instead of an L1\n", cache->level, | |
447 | cache_type_string(cache), cache->ofnode); | |
448 | WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"), | |
449 | "instantiating cache chain from node %pOFP of type '%s' " | |
450 | "instead of a cpu node\n", cache->ofnode, | |
451 | of_node_get_device_type(cache->ofnode)); | |
93197a36 NL |
452 | } |
453 | ||
a4bec516 GS |
454 | /* |
455 | * If sub-groups of threads in a core containing @cpu_id share the | |
456 | * L@level-cache (information obtained via "ibm,thread-groups" | |
457 | * device-tree property), then we identify the group by the first | |
458 | * thread-sibling in the group. We define this to be the group-id. | |
459 | * | |
460 | * In the absence of any thread-group information for L@level-cache, | |
461 | * this function returns -1. | |
462 | */ | |
463 | static int get_group_id(unsigned int cpu_id, int level) | |
464 | { | |
465 | if (has_big_cores && level == 1) | |
466 | return cpumask_first(per_cpu(thread_group_l1_cache_map, | |
467 | cpu_id)); | |
468 | else if (thread_group_shares_l2 && level == 2) | |
469 | return cpumask_first(per_cpu(thread_group_l2_cache_map, | |
470 | cpu_id)); | |
e9ef81e1 PS |
471 | else if (thread_group_shares_l3 && level == 3) |
472 | return cpumask_first(per_cpu(thread_group_l3_cache_map, | |
473 | cpu_id)); | |
a4bec516 GS |
474 | return -1; |
475 | } | |
476 | ||
477 | static void do_subsidiary_caches(struct cache *cache, unsigned int cpu_id) | |
93197a36 NL |
478 | { |
479 | struct device_node *subcache_node; | |
480 | int level = cache->level; | |
481 | ||
482 | do_subsidiary_caches_debugcheck(cache); | |
483 | ||
484 | while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { | |
485 | struct cache *subcache; | |
a4bec516 | 486 | int group_id; |
93197a36 NL |
487 | |
488 | level++; | |
a4bec516 GS |
489 | group_id = get_group_id(cpu_id, level); |
490 | subcache = cache_lookup_or_instantiate(subcache_node, group_id, level); | |
93197a36 NL |
491 | of_node_put(subcache_node); |
492 | if (!subcache) | |
493 | break; | |
494 | ||
495 | link_cache_lists(cache, subcache); | |
496 | cache = subcache; | |
497 | } | |
498 | } | |
499 | ||
061d19f2 | 500 | static struct cache *cache_chain_instantiate(unsigned int cpu_id) |
93197a36 NL |
501 | { |
502 | struct device_node *cpu_node; | |
503 | struct cache *cpu_cache = NULL; | |
a4bec516 | 504 | int group_id; |
93197a36 NL |
505 | |
506 | pr_debug("creating cache object(s) for CPU %i\n", cpu_id); | |
507 | ||
508 | cpu_node = of_get_cpu_node(cpu_id, NULL); | |
509 | WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); | |
510 | if (!cpu_node) | |
511 | goto out; | |
512 | ||
a4bec516 GS |
513 | group_id = get_group_id(cpu_id, 1); |
514 | ||
515 | cpu_cache = cache_lookup_or_instantiate(cpu_node, group_id, 1); | |
93197a36 NL |
516 | if (!cpu_cache) |
517 | goto out; | |
518 | ||
a4bec516 | 519 | do_subsidiary_caches(cpu_cache, cpu_id); |
93197a36 NL |
520 | |
521 | cache_cpu_set(cpu_cache, cpu_id); | |
522 | out: | |
523 | of_node_put(cpu_node); | |
524 | ||
525 | return cpu_cache; | |
526 | } | |
527 | ||
061d19f2 | 528 | static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id) |
93197a36 NL |
529 | { |
530 | struct cache_dir *cache_dir; | |
8a25a2fd | 531 | struct device *dev; |
93197a36 NL |
532 | struct kobject *kobj = NULL; |
533 | ||
8a25a2fd KS |
534 | dev = get_cpu_device(cpu_id); |
535 | WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id); | |
536 | if (!dev) | |
93197a36 NL |
537 | goto err; |
538 | ||
8a25a2fd | 539 | kobj = kobject_create_and_add("cache", &dev->kobj); |
93197a36 NL |
540 | if (!kobj) |
541 | goto err; | |
542 | ||
543 | cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); | |
544 | if (!cache_dir) | |
545 | goto err; | |
546 | ||
547 | cache_dir->kobj = kobj; | |
548 | ||
fc7a9feb | 549 | WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL); |
93197a36 | 550 | |
fc7a9feb | 551 | per_cpu(cache_dir_pcpu, cpu_id) = cache_dir; |
93197a36 NL |
552 | |
553 | return cache_dir; | |
554 | err: | |
555 | kobject_put(kobj); | |
556 | return NULL; | |
557 | } | |
558 | ||
559 | static void cache_index_release(struct kobject *kobj) | |
560 | { | |
561 | struct cache_index_dir *index; | |
562 | ||
563 | index = kobj_to_cache_index_dir(kobj); | |
564 | ||
565 | pr_debug("freeing index directory for L%d %s cache\n", | |
566 | index->cache->level, cache_type_string(index->cache)); | |
567 | ||
568 | kfree(index); | |
569 | } | |
570 | ||
571 | static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf) | |
572 | { | |
573 | struct kobj_attribute *kobj_attr; | |
574 | ||
575 | kobj_attr = container_of(attr, struct kobj_attribute, attr); | |
576 | ||
577 | return kobj_attr->show(k, kobj_attr, buf); | |
578 | } | |
579 | ||
580 | static struct cache *index_kobj_to_cache(struct kobject *k) | |
581 | { | |
582 | struct cache_index_dir *index; | |
583 | ||
584 | index = kobj_to_cache_index_dir(k); | |
585 | ||
586 | return index->cache; | |
587 | } | |
588 | ||
589 | static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | |
590 | { | |
591 | unsigned int size_kb; | |
592 | struct cache *cache; | |
593 | ||
594 | cache = index_kobj_to_cache(k); | |
595 | ||
596 | if (cache_size_kb(cache, &size_kb)) | |
597 | return -ENODEV; | |
598 | ||
599 | return sprintf(buf, "%uK\n", size_kb); | |
600 | } | |
601 | ||
602 | static struct kobj_attribute cache_size_attr = | |
603 | __ATTR(size, 0444, size_show, NULL); | |
604 | ||
605 | ||
606 | static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | |
607 | { | |
608 | unsigned int line_size; | |
609 | struct cache *cache; | |
610 | ||
611 | cache = index_kobj_to_cache(k); | |
612 | ||
613 | if (cache_get_line_size(cache, &line_size)) | |
614 | return -ENODEV; | |
615 | ||
616 | return sprintf(buf, "%u\n", line_size); | |
617 | } | |
618 | ||
619 | static struct kobj_attribute cache_line_size_attr = | |
620 | __ATTR(coherency_line_size, 0444, line_size_show, NULL); | |
621 | ||
622 | static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | |
623 | { | |
624 | unsigned int nr_sets; | |
625 | struct cache *cache; | |
626 | ||
627 | cache = index_kobj_to_cache(k); | |
628 | ||
629 | if (cache_nr_sets(cache, &nr_sets)) | |
630 | return -ENODEV; | |
631 | ||
632 | return sprintf(buf, "%u\n", nr_sets); | |
633 | } | |
634 | ||
635 | static struct kobj_attribute cache_nr_sets_attr = | |
636 | __ATTR(number_of_sets, 0444, nr_sets_show, NULL); | |
637 | ||
638 | static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | |
639 | { | |
640 | unsigned int associativity; | |
641 | struct cache *cache; | |
642 | ||
643 | cache = index_kobj_to_cache(k); | |
644 | ||
645 | if (cache_associativity(cache, &associativity)) | |
646 | return -ENODEV; | |
647 | ||
648 | return sprintf(buf, "%u\n", associativity); | |
649 | } | |
650 | ||
651 | static struct kobj_attribute cache_assoc_attr = | |
652 | __ATTR(ways_of_associativity, 0444, associativity_show, NULL); | |
653 | ||
654 | static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | |
655 | { | |
656 | struct cache *cache; | |
657 | ||
658 | cache = index_kobj_to_cache(k); | |
659 | ||
660 | return sprintf(buf, "%s\n", cache_type_string(cache)); | |
661 | } | |
662 | ||
663 | static struct kobj_attribute cache_type_attr = | |
664 | __ATTR(type, 0444, type_show, NULL); | |
665 | ||
666 | static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | |
667 | { | |
668 | struct cache_index_dir *index; | |
669 | struct cache *cache; | |
670 | ||
671 | index = kobj_to_cache_index_dir(k); | |
672 | cache = index->cache; | |
673 | ||
674 | return sprintf(buf, "%d\n", cache->level); | |
675 | } | |
676 | ||
677 | static struct kobj_attribute cache_level_attr = | |
678 | __ATTR(level, 0444, level_show, NULL); | |
679 | ||
74b7492e SD |
680 | static ssize_t |
681 | show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list) | |
93197a36 NL |
682 | { |
683 | struct cache_index_dir *index; | |
684 | struct cache *cache; | |
500fe5f5 | 685 | const struct cpumask *mask; |
93197a36 NL |
686 | |
687 | index = kobj_to_cache_index_dir(k); | |
688 | cache = index->cache; | |
93197a36 | 689 | |
69aa8e07 | 690 | mask = &cache->shared_cpu_map; |
500fe5f5 | 691 | |
74b7492e SD |
692 | return cpumap_print_to_pagebuf(list, buf, mask); |
693 | } | |
694 | ||
695 | static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | |
696 | { | |
a87a77cb SD |
697 | return show_shared_cpumap(k, attr, buf, false); |
698 | } | |
699 | ||
700 | static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf) | |
701 | { | |
702 | return show_shared_cpumap(k, attr, buf, true); | |
93197a36 NL |
703 | } |
704 | ||
705 | static struct kobj_attribute cache_shared_cpu_map_attr = | |
706 | __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); | |
707 | ||
a87a77cb SD |
708 | static struct kobj_attribute cache_shared_cpu_list_attr = |
709 | __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); | |
710 | ||
93197a36 | 711 | /* Attributes which should always be created -- the kobject/sysfs core |
2bdf3f9e | 712 | * does this automatically via kobj_type->default_groups. This is the |
93197a36 NL |
713 | * minimum data required to uniquely identify a cache. |
714 | */ | |
715 | static struct attribute *cache_index_default_attrs[] = { | |
716 | &cache_type_attr.attr, | |
717 | &cache_level_attr.attr, | |
718 | &cache_shared_cpu_map_attr.attr, | |
a87a77cb | 719 | &cache_shared_cpu_list_attr.attr, |
93197a36 NL |
720 | NULL, |
721 | }; | |
2bdf3f9e | 722 | ATTRIBUTE_GROUPS(cache_index_default); |
93197a36 NL |
723 | |
724 | /* Attributes which should be created if the cache device node has the | |
725 | * right properties -- see cacheinfo_create_index_opt_attrs | |
726 | */ | |
727 | static struct kobj_attribute *cache_index_opt_attrs[] = { | |
728 | &cache_size_attr, | |
729 | &cache_line_size_attr, | |
730 | &cache_nr_sets_attr, | |
731 | &cache_assoc_attr, | |
732 | }; | |
733 | ||
52cf25d0 | 734 | static const struct sysfs_ops cache_index_ops = { |
93197a36 NL |
735 | .show = cache_index_show, |
736 | }; | |
737 | ||
7509c237 | 738 | static const struct kobj_type cache_index_type = { |
93197a36 NL |
739 | .release = cache_index_release, |
740 | .sysfs_ops = &cache_index_ops, | |
2bdf3f9e | 741 | .default_groups = cache_index_default_groups, |
93197a36 NL |
742 | }; |
743 | ||
061d19f2 | 744 | static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) |
93197a36 | 745 | { |
93197a36 NL |
746 | const char *cache_type; |
747 | struct cache *cache; | |
748 | char *buf; | |
749 | int i; | |
750 | ||
751 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | |
752 | if (!buf) | |
753 | return; | |
754 | ||
755 | cache = dir->cache; | |
93197a36 NL |
756 | cache_type = cache_type_string(cache); |
757 | ||
758 | /* We don't want to create an attribute that can't provide a | |
759 | * meaningful value. Check the return value of each optional | |
760 | * attribute's ->show method before registering the | |
761 | * attribute. | |
762 | */ | |
763 | for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { | |
764 | struct kobj_attribute *attr; | |
765 | ssize_t rc; | |
766 | ||
767 | attr = cache_index_opt_attrs[i]; | |
768 | ||
769 | rc = attr->show(&dir->kobj, attr, buf); | |
770 | if (rc <= 0) { | |
771 | pr_debug("not creating %s attribute for " | |
be6f885e | 772 | "%pOFP(%s) (rc = %zd)\n", |
b7c670d6 | 773 | attr->attr.name, cache->ofnode, |
93197a36 NL |
774 | cache_type, rc); |
775 | continue; | |
776 | } | |
777 | if (sysfs_create_file(&dir->kobj, &attr->attr)) | |
be6f885e | 778 | pr_debug("could not create %s attribute for %pOFP(%s)\n", |
b7c670d6 | 779 | attr->attr.name, cache->ofnode, cache_type); |
93197a36 NL |
780 | } |
781 | ||
782 | kfree(buf); | |
783 | } | |
784 | ||
061d19f2 PG |
785 | static void cacheinfo_create_index_dir(struct cache *cache, int index, |
786 | struct cache_dir *cache_dir) | |
93197a36 NL |
787 | { |
788 | struct cache_index_dir *index_dir; | |
789 | int rc; | |
790 | ||
791 | index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); | |
792 | if (!index_dir) | |
7e803979 | 793 | return; |
93197a36 NL |
794 | |
795 | index_dir->cache = cache; | |
796 | ||
797 | rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, | |
798 | cache_dir->kobj, "index%d", index); | |
7e803979 TH |
799 | if (rc) { |
800 | kobject_put(&index_dir->kobj); | |
7e803979 TH |
801 | return; |
802 | } | |
93197a36 NL |
803 | |
804 | index_dir->next = cache_dir->index; | |
805 | cache_dir->index = index_dir; | |
806 | ||
807 | cacheinfo_create_index_opt_attrs(index_dir); | |
93197a36 NL |
808 | } |
809 | ||
061d19f2 PG |
810 | static void cacheinfo_sysfs_populate(unsigned int cpu_id, |
811 | struct cache *cache_list) | |
93197a36 NL |
812 | { |
813 | struct cache_dir *cache_dir; | |
814 | struct cache *cache; | |
815 | int index = 0; | |
816 | ||
817 | cache_dir = cacheinfo_create_cache_dir(cpu_id); | |
818 | if (!cache_dir) | |
819 | return; | |
820 | ||
821 | cache = cache_list; | |
822 | while (cache) { | |
823 | cacheinfo_create_index_dir(cache, index, cache_dir); | |
824 | index++; | |
825 | cache = cache->next_local; | |
826 | } | |
827 | } | |
828 | ||
061d19f2 | 829 | void cacheinfo_cpu_online(unsigned int cpu_id) |
93197a36 NL |
830 | { |
831 | struct cache *cache; | |
832 | ||
833 | cache = cache_chain_instantiate(cpu_id); | |
834 | if (!cache) | |
835 | return; | |
836 | ||
837 | cacheinfo_sysfs_populate(cpu_id, cache); | |
838 | } | |
839 | ||
6b36ba84 HM |
840 | /* functions needed to remove cache entry for cpu offline or suspend/resume */ |
841 | ||
842 | #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \ | |
843 | defined(CONFIG_HOTPLUG_CPU) | |
93197a36 NL |
844 | |
845 | static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) | |
846 | { | |
847 | struct device_node *cpu_node; | |
848 | struct cache *cache; | |
a4bec516 | 849 | int group_id; |
93197a36 NL |
850 | |
851 | cpu_node = of_get_cpu_node(cpu_id, NULL); | |
852 | WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); | |
853 | if (!cpu_node) | |
854 | return NULL; | |
855 | ||
a4bec516 GS |
856 | group_id = get_group_id(cpu_id, 1); |
857 | cache = cache_lookup_by_node_group(cpu_node, group_id); | |
93197a36 NL |
858 | of_node_put(cpu_node); |
859 | ||
860 | return cache; | |
861 | } | |
862 | ||
863 | static void remove_index_dirs(struct cache_dir *cache_dir) | |
864 | { | |
865 | struct cache_index_dir *index; | |
866 | ||
867 | index = cache_dir->index; | |
868 | ||
869 | while (index) { | |
870 | struct cache_index_dir *next; | |
871 | ||
872 | next = index->next; | |
873 | kobject_put(&index->kobj); | |
874 | index = next; | |
875 | } | |
876 | } | |
877 | ||
878 | static void remove_cache_dir(struct cache_dir *cache_dir) | |
879 | { | |
880 | remove_index_dirs(cache_dir); | |
881 | ||
91b973f9 PM |
882 | /* Remove cache dir from sysfs */ |
883 | kobject_del(cache_dir->kobj); | |
884 | ||
93197a36 NL |
885 | kobject_put(cache_dir->kobj); |
886 | ||
887 | kfree(cache_dir); | |
888 | } | |
889 | ||
890 | static void cache_cpu_clear(struct cache *cache, int cpu) | |
891 | { | |
892 | while (cache) { | |
893 | struct cache *next = cache->next_local; | |
894 | ||
895 | WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), | |
be6f885e | 896 | "CPU %i not accounted in %pOFP(%s)\n", |
b7c670d6 | 897 | cpu, cache->ofnode, |
93197a36 NL |
898 | cache_type_string(cache)); |
899 | ||
900 | cpumask_clear_cpu(cpu, &cache->shared_cpu_map); | |
901 | ||
902 | /* Release the cache object if all the cpus using it | |
903 | * are offline */ | |
904 | if (cpumask_empty(&cache->shared_cpu_map)) | |
905 | release_cache(cache); | |
906 | ||
907 | cache = next; | |
908 | } | |
909 | } | |
910 | ||
911 | void cacheinfo_cpu_offline(unsigned int cpu_id) | |
912 | { | |
913 | struct cache_dir *cache_dir; | |
914 | struct cache *cache; | |
915 | ||
916 | /* Prevent userspace from seeing inconsistent state - remove | |
917 | * the sysfs hierarchy first */ | |
fc7a9feb | 918 | cache_dir = per_cpu(cache_dir_pcpu, cpu_id); |
93197a36 NL |
919 | |
920 | /* careful, sysfs population may have failed */ | |
921 | if (cache_dir) | |
922 | remove_cache_dir(cache_dir); | |
923 | ||
fc7a9feb | 924 | per_cpu(cache_dir_pcpu, cpu_id) = NULL; |
93197a36 NL |
925 | |
926 | /* clear the CPU's bit in its cache chain, possibly freeing | |
927 | * cache objects */ | |
928 | cache = cache_lookup_by_cpu(cpu_id); | |
929 | if (cache) | |
930 | cache_cpu_clear(cache, cpu_id); | |
931 | } | |
d4aa219a NL |
932 | |
933 | void cacheinfo_teardown(void) | |
934 | { | |
935 | unsigned int cpu; | |
936 | ||
937 | lockdep_assert_cpus_held(); | |
938 | ||
939 | for_each_online_cpu(cpu) | |
940 | cacheinfo_cpu_offline(cpu); | |
941 | } | |
942 | ||
943 | void cacheinfo_rebuild(void) | |
944 | { | |
945 | unsigned int cpu; | |
946 | ||
947 | lockdep_assert_cpus_held(); | |
948 | ||
949 | for_each_online_cpu(cpu) | |
950 | cacheinfo_cpu_online(cpu); | |
951 | } | |
952 | ||
6b36ba84 | 953 | #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */ |