Merge tag 'i2c-for-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[linux-block.git] / drivers / base / cacheinfo.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * cacheinfo support - processor cache information via sysfs
4  *
5  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6  * Author: Sudeep Holla <sudeep.holla@arm.com>
7  */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/acpi.h>
11 #include <linux/bitops.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
17 #include <linux/of_device.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/sysfs.h>
22
23 /* pointer to per cpu cacheinfo */
24 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25 #define ci_cacheinfo(cpu)       (&per_cpu(ci_cpu_cacheinfo, cpu))
26 #define cache_leaves(cpu)       (ci_cacheinfo(cpu)->num_leaves)
27 #define per_cpu_cacheinfo(cpu)  (ci_cacheinfo(cpu)->info_list)
28 #define per_cpu_cacheinfo_idx(cpu, idx)         \
29                                 (per_cpu_cacheinfo(cpu) + (idx))
30
31 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
32 {
33         return ci_cacheinfo(cpu);
34 }
35
36 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
37                                            struct cacheinfo *sib_leaf)
38 {
39         /*
40          * For non DT/ACPI systems, assume unique level 1 caches,
41          * system-wide shared caches for all other levels. This will be used
42          * only if arch specific code has not populated shared_cpu_map
43          */
44         if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
45                 return !(this_leaf->level == 1);
46
47         if ((sib_leaf->attributes & CACHE_ID) &&
48             (this_leaf->attributes & CACHE_ID))
49                 return sib_leaf->id == this_leaf->id;
50
51         return sib_leaf->fw_token == this_leaf->fw_token;
52 }
53
54 bool last_level_cache_is_valid(unsigned int cpu)
55 {
56         struct cacheinfo *llc;
57
58         if (!cache_leaves(cpu))
59                 return false;
60
61         llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
62
63         return (llc->attributes & CACHE_ID) || !!llc->fw_token;
64
65 }
66
67 bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
68 {
69         struct cacheinfo *llc_x, *llc_y;
70
71         if (!last_level_cache_is_valid(cpu_x) ||
72             !last_level_cache_is_valid(cpu_y))
73                 return false;
74
75         llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
76         llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
77
78         return cache_leaves_are_shared(llc_x, llc_y);
79 }
80
81 #ifdef CONFIG_OF
82 /* OF properties to query for a given cache type */
83 struct cache_type_info {
84         const char *size_prop;
85         const char *line_size_props[2];
86         const char *nr_sets_prop;
87 };
88
89 static const struct cache_type_info cache_type_info[] = {
90         {
91                 .size_prop       = "cache-size",
92                 .line_size_props = { "cache-line-size",
93                                      "cache-block-size", },
94                 .nr_sets_prop    = "cache-sets",
95         }, {
96                 .size_prop       = "i-cache-size",
97                 .line_size_props = { "i-cache-line-size",
98                                      "i-cache-block-size", },
99                 .nr_sets_prop    = "i-cache-sets",
100         }, {
101                 .size_prop       = "d-cache-size",
102                 .line_size_props = { "d-cache-line-size",
103                                      "d-cache-block-size", },
104                 .nr_sets_prop    = "d-cache-sets",
105         },
106 };
107
108 static inline int get_cacheinfo_idx(enum cache_type type)
109 {
110         if (type == CACHE_TYPE_UNIFIED)
111                 return 0;
112         return type;
113 }
114
115 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
116 {
117         const char *propname;
118         int ct_idx;
119
120         ct_idx = get_cacheinfo_idx(this_leaf->type);
121         propname = cache_type_info[ct_idx].size_prop;
122
123         of_property_read_u32(np, propname, &this_leaf->size);
124 }
125
126 /* not cache_line_size() because that's a macro in include/linux/cache.h */
127 static void cache_get_line_size(struct cacheinfo *this_leaf,
128                                 struct device_node *np)
129 {
130         int i, lim, ct_idx;
131
132         ct_idx = get_cacheinfo_idx(this_leaf->type);
133         lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
134
135         for (i = 0; i < lim; i++) {
136                 int ret;
137                 u32 line_size;
138                 const char *propname;
139
140                 propname = cache_type_info[ct_idx].line_size_props[i];
141                 ret = of_property_read_u32(np, propname, &line_size);
142                 if (!ret) {
143                         this_leaf->coherency_line_size = line_size;
144                         break;
145                 }
146         }
147 }
148
149 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
150 {
151         const char *propname;
152         int ct_idx;
153
154         ct_idx = get_cacheinfo_idx(this_leaf->type);
155         propname = cache_type_info[ct_idx].nr_sets_prop;
156
157         of_property_read_u32(np, propname, &this_leaf->number_of_sets);
158 }
159
160 static void cache_associativity(struct cacheinfo *this_leaf)
161 {
162         unsigned int line_size = this_leaf->coherency_line_size;
163         unsigned int nr_sets = this_leaf->number_of_sets;
164         unsigned int size = this_leaf->size;
165
166         /*
167          * If the cache is fully associative, there is no need to
168          * check the other properties.
169          */
170         if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
171                 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
172 }
173
174 static bool cache_node_is_unified(struct cacheinfo *this_leaf,
175                                   struct device_node *np)
176 {
177         return of_property_read_bool(np, "cache-unified");
178 }
179
180 static void cache_of_set_props(struct cacheinfo *this_leaf,
181                                struct device_node *np)
182 {
183         /*
184          * init_cache_level must setup the cache level correctly
185          * overriding the architecturally specified levels, so
186          * if type is NONE at this stage, it should be unified
187          */
188         if (this_leaf->type == CACHE_TYPE_NOCACHE &&
189             cache_node_is_unified(this_leaf, np))
190                 this_leaf->type = CACHE_TYPE_UNIFIED;
191         cache_size(this_leaf, np);
192         cache_get_line_size(this_leaf, np);
193         cache_nr_sets(this_leaf, np);
194         cache_associativity(this_leaf);
195 }
196
197 static int cache_setup_of_node(unsigned int cpu)
198 {
199         struct device_node *np;
200         struct cacheinfo *this_leaf;
201         unsigned int index = 0;
202
203         np = of_cpu_device_node_get(cpu);
204         if (!np) {
205                 pr_err("Failed to find cpu%d device node\n", cpu);
206                 return -ENOENT;
207         }
208
209         while (index < cache_leaves(cpu)) {
210                 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
211                 if (this_leaf->level != 1)
212                         np = of_find_next_cache_node(np);
213                 else
214                         np = of_node_get(np);/* cpu node itself */
215                 if (!np)
216                         break;
217                 cache_of_set_props(this_leaf, np);
218                 this_leaf->fw_token = np;
219                 index++;
220         }
221
222         if (index != cache_leaves(cpu)) /* not all OF nodes populated */
223                 return -ENOENT;
224
225         return 0;
226 }
227 #else
228 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
229 #endif
230
231 int __weak cache_setup_acpi(unsigned int cpu)
232 {
233         return -ENOTSUPP;
234 }
235
236 unsigned int coherency_max_size;
237
238 static int cache_setup_properties(unsigned int cpu)
239 {
240         int ret = 0;
241
242         if (of_have_populated_dt())
243                 ret = cache_setup_of_node(cpu);
244         else if (!acpi_disabled)
245                 ret = cache_setup_acpi(cpu);
246
247         return ret;
248 }
249
250 static int cache_shared_cpu_map_setup(unsigned int cpu)
251 {
252         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
253         struct cacheinfo *this_leaf, *sib_leaf;
254         unsigned int index;
255         int ret = 0;
256
257         if (this_cpu_ci->cpu_map_populated)
258                 return 0;
259
260         /*
261          * skip setting up cache properties if LLC is valid, just need
262          * to update the shared cpu_map if the cache attributes were
263          * populated early before all the cpus are brought online
264          */
265         if (!last_level_cache_is_valid(cpu)) {
266                 ret = cache_setup_properties(cpu);
267                 if (ret)
268                         return ret;
269         }
270
271         for (index = 0; index < cache_leaves(cpu); index++) {
272                 unsigned int i;
273
274                 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
275
276                 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
277                 for_each_online_cpu(i) {
278                         struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
279
280                         if (i == cpu || !sib_cpu_ci->info_list)
281                                 continue;/* skip if itself or no cacheinfo */
282
283                         sib_leaf = per_cpu_cacheinfo_idx(i, index);
284                         if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
285                                 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
286                                 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
287                         }
288                 }
289                 /* record the maximum cache line size */
290                 if (this_leaf->coherency_line_size > coherency_max_size)
291                         coherency_max_size = this_leaf->coherency_line_size;
292         }
293
294         return 0;
295 }
296
297 static void cache_shared_cpu_map_remove(unsigned int cpu)
298 {
299         struct cacheinfo *this_leaf, *sib_leaf;
300         unsigned int sibling, index;
301
302         for (index = 0; index < cache_leaves(cpu); index++) {
303                 this_leaf = per_cpu_cacheinfo_idx(cpu, index);
304                 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
305                         struct cpu_cacheinfo *sib_cpu_ci =
306                                                 get_cpu_cacheinfo(sibling);
307
308                         if (sibling == cpu || !sib_cpu_ci->info_list)
309                                 continue;/* skip if itself or no cacheinfo */
310
311                         sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
312                         cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
313                         cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
314                 }
315                 if (of_have_populated_dt())
316                         of_node_put(this_leaf->fw_token);
317         }
318 }
319
320 static void free_cache_attributes(unsigned int cpu)
321 {
322         if (!per_cpu_cacheinfo(cpu))
323                 return;
324
325         cache_shared_cpu_map_remove(cpu);
326
327         kfree(per_cpu_cacheinfo(cpu));
328         per_cpu_cacheinfo(cpu) = NULL;
329         cache_leaves(cpu) = 0;
330 }
331
332 int __weak init_cache_level(unsigned int cpu)
333 {
334         return -ENOENT;
335 }
336
337 int __weak populate_cache_leaves(unsigned int cpu)
338 {
339         return -ENOENT;
340 }
341
342 int detect_cache_attributes(unsigned int cpu)
343 {
344         int ret;
345
346         /* Since early detection of the cacheinfo is allowed via this
347          * function and this also gets called as CPU hotplug callbacks via
348          * cacheinfo_cpu_online, the initialisation can be skipped and only
349          * CPU maps can be updated as the CPU online status would be update
350          * if called via cacheinfo_cpu_online path.
351          */
352         if (per_cpu_cacheinfo(cpu))
353                 goto update_cpu_map;
354
355         if (init_cache_level(cpu) || !cache_leaves(cpu))
356                 return -ENOENT;
357
358         per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
359                                          sizeof(struct cacheinfo), GFP_ATOMIC);
360         if (per_cpu_cacheinfo(cpu) == NULL) {
361                 cache_leaves(cpu) = 0;
362                 return -ENOMEM;
363         }
364
365         /*
366          * populate_cache_leaves() may completely setup the cache leaves and
367          * shared_cpu_map or it may leave it partially setup.
368          */
369         ret = populate_cache_leaves(cpu);
370         if (ret)
371                 goto free_ci;
372
373 update_cpu_map:
374         /*
375          * For systems using DT for cache hierarchy, fw_token
376          * and shared_cpu_map will be set up here only if they are
377          * not populated already
378          */
379         ret = cache_shared_cpu_map_setup(cpu);
380         if (ret) {
381                 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
382                 goto free_ci;
383         }
384
385         return 0;
386
387 free_ci:
388         free_cache_attributes(cpu);
389         return ret;
390 }
391
392 /* pointer to cpuX/cache device */
393 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
394 #define per_cpu_cache_dev(cpu)  (per_cpu(ci_cache_dev, cpu))
395
396 static cpumask_t cache_dev_map;
397
398 /* pointer to array of devices for cpuX/cache/indexY */
399 static DEFINE_PER_CPU(struct device **, ci_index_dev);
400 #define per_cpu_index_dev(cpu)  (per_cpu(ci_index_dev, cpu))
401 #define per_cache_index_dev(cpu, idx)   ((per_cpu_index_dev(cpu))[idx])
402
403 #define show_one(file_name, object)                             \
404 static ssize_t file_name##_show(struct device *dev,             \
405                 struct device_attribute *attr, char *buf)       \
406 {                                                               \
407         struct cacheinfo *this_leaf = dev_get_drvdata(dev);     \
408         return sysfs_emit(buf, "%u\n", this_leaf->object);      \
409 }
410
411 show_one(id, id);
412 show_one(level, level);
413 show_one(coherency_line_size, coherency_line_size);
414 show_one(number_of_sets, number_of_sets);
415 show_one(physical_line_partition, physical_line_partition);
416 show_one(ways_of_associativity, ways_of_associativity);
417
418 static ssize_t size_show(struct device *dev,
419                          struct device_attribute *attr, char *buf)
420 {
421         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
422
423         return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
424 }
425
426 static ssize_t shared_cpu_map_show(struct device *dev,
427                                    struct device_attribute *attr, char *buf)
428 {
429         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
430         const struct cpumask *mask = &this_leaf->shared_cpu_map;
431
432         return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
433 }
434
435 static ssize_t shared_cpu_list_show(struct device *dev,
436                                     struct device_attribute *attr, char *buf)
437 {
438         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
439         const struct cpumask *mask = &this_leaf->shared_cpu_map;
440
441         return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
442 }
443
444 static ssize_t type_show(struct device *dev,
445                          struct device_attribute *attr, char *buf)
446 {
447         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
448         const char *output;
449
450         switch (this_leaf->type) {
451         case CACHE_TYPE_DATA:
452                 output = "Data";
453                 break;
454         case CACHE_TYPE_INST:
455                 output = "Instruction";
456                 break;
457         case CACHE_TYPE_UNIFIED:
458                 output = "Unified";
459                 break;
460         default:
461                 return -EINVAL;
462         }
463
464         return sysfs_emit(buf, "%s\n", output);
465 }
466
467 static ssize_t allocation_policy_show(struct device *dev,
468                                       struct device_attribute *attr, char *buf)
469 {
470         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
471         unsigned int ci_attr = this_leaf->attributes;
472         const char *output;
473
474         if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
475                 output = "ReadWriteAllocate";
476         else if (ci_attr & CACHE_READ_ALLOCATE)
477                 output = "ReadAllocate";
478         else if (ci_attr & CACHE_WRITE_ALLOCATE)
479                 output = "WriteAllocate";
480         else
481                 return 0;
482
483         return sysfs_emit(buf, "%s\n", output);
484 }
485
486 static ssize_t write_policy_show(struct device *dev,
487                                  struct device_attribute *attr, char *buf)
488 {
489         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
490         unsigned int ci_attr = this_leaf->attributes;
491         int n = 0;
492
493         if (ci_attr & CACHE_WRITE_THROUGH)
494                 n = sysfs_emit(buf, "WriteThrough\n");
495         else if (ci_attr & CACHE_WRITE_BACK)
496                 n = sysfs_emit(buf, "WriteBack\n");
497         return n;
498 }
499
500 static DEVICE_ATTR_RO(id);
501 static DEVICE_ATTR_RO(level);
502 static DEVICE_ATTR_RO(type);
503 static DEVICE_ATTR_RO(coherency_line_size);
504 static DEVICE_ATTR_RO(ways_of_associativity);
505 static DEVICE_ATTR_RO(number_of_sets);
506 static DEVICE_ATTR_RO(size);
507 static DEVICE_ATTR_RO(allocation_policy);
508 static DEVICE_ATTR_RO(write_policy);
509 static DEVICE_ATTR_RO(shared_cpu_map);
510 static DEVICE_ATTR_RO(shared_cpu_list);
511 static DEVICE_ATTR_RO(physical_line_partition);
512
513 static struct attribute *cache_default_attrs[] = {
514         &dev_attr_id.attr,
515         &dev_attr_type.attr,
516         &dev_attr_level.attr,
517         &dev_attr_shared_cpu_map.attr,
518         &dev_attr_shared_cpu_list.attr,
519         &dev_attr_coherency_line_size.attr,
520         &dev_attr_ways_of_associativity.attr,
521         &dev_attr_number_of_sets.attr,
522         &dev_attr_size.attr,
523         &dev_attr_allocation_policy.attr,
524         &dev_attr_write_policy.attr,
525         &dev_attr_physical_line_partition.attr,
526         NULL
527 };
528
529 static umode_t
530 cache_default_attrs_is_visible(struct kobject *kobj,
531                                struct attribute *attr, int unused)
532 {
533         struct device *dev = kobj_to_dev(kobj);
534         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
535         const struct cpumask *mask = &this_leaf->shared_cpu_map;
536         umode_t mode = attr->mode;
537
538         if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
539                 return mode;
540         if ((attr == &dev_attr_type.attr) && this_leaf->type)
541                 return mode;
542         if ((attr == &dev_attr_level.attr) && this_leaf->level)
543                 return mode;
544         if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
545                 return mode;
546         if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
547                 return mode;
548         if ((attr == &dev_attr_coherency_line_size.attr) &&
549             this_leaf->coherency_line_size)
550                 return mode;
551         if ((attr == &dev_attr_ways_of_associativity.attr) &&
552             this_leaf->size) /* allow 0 = full associativity */
553                 return mode;
554         if ((attr == &dev_attr_number_of_sets.attr) &&
555             this_leaf->number_of_sets)
556                 return mode;
557         if ((attr == &dev_attr_size.attr) && this_leaf->size)
558                 return mode;
559         if ((attr == &dev_attr_write_policy.attr) &&
560             (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
561                 return mode;
562         if ((attr == &dev_attr_allocation_policy.attr) &&
563             (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
564                 return mode;
565         if ((attr == &dev_attr_physical_line_partition.attr) &&
566             this_leaf->physical_line_partition)
567                 return mode;
568
569         return 0;
570 }
571
572 static const struct attribute_group cache_default_group = {
573         .attrs = cache_default_attrs,
574         .is_visible = cache_default_attrs_is_visible,
575 };
576
577 static const struct attribute_group *cache_default_groups[] = {
578         &cache_default_group,
579         NULL,
580 };
581
582 static const struct attribute_group *cache_private_groups[] = {
583         &cache_default_group,
584         NULL, /* Place holder for private group */
585         NULL,
586 };
587
588 const struct attribute_group *
589 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
590 {
591         return NULL;
592 }
593
594 static const struct attribute_group **
595 cache_get_attribute_groups(struct cacheinfo *this_leaf)
596 {
597         const struct attribute_group *priv_group =
598                         cache_get_priv_group(this_leaf);
599
600         if (!priv_group)
601                 return cache_default_groups;
602
603         if (!cache_private_groups[1])
604                 cache_private_groups[1] = priv_group;
605
606         return cache_private_groups;
607 }
608
609 /* Add/Remove cache interface for CPU device */
610 static void cpu_cache_sysfs_exit(unsigned int cpu)
611 {
612         int i;
613         struct device *ci_dev;
614
615         if (per_cpu_index_dev(cpu)) {
616                 for (i = 0; i < cache_leaves(cpu); i++) {
617                         ci_dev = per_cache_index_dev(cpu, i);
618                         if (!ci_dev)
619                                 continue;
620                         device_unregister(ci_dev);
621                 }
622                 kfree(per_cpu_index_dev(cpu));
623                 per_cpu_index_dev(cpu) = NULL;
624         }
625         device_unregister(per_cpu_cache_dev(cpu));
626         per_cpu_cache_dev(cpu) = NULL;
627 }
628
629 static int cpu_cache_sysfs_init(unsigned int cpu)
630 {
631         struct device *dev = get_cpu_device(cpu);
632
633         if (per_cpu_cacheinfo(cpu) == NULL)
634                 return -ENOENT;
635
636         per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
637         if (IS_ERR(per_cpu_cache_dev(cpu)))
638                 return PTR_ERR(per_cpu_cache_dev(cpu));
639
640         /* Allocate all required memory */
641         per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
642                                          sizeof(struct device *), GFP_KERNEL);
643         if (unlikely(per_cpu_index_dev(cpu) == NULL))
644                 goto err_out;
645
646         return 0;
647
648 err_out:
649         cpu_cache_sysfs_exit(cpu);
650         return -ENOMEM;
651 }
652
653 static int cache_add_dev(unsigned int cpu)
654 {
655         unsigned int i;
656         int rc;
657         struct device *ci_dev, *parent;
658         struct cacheinfo *this_leaf;
659         const struct attribute_group **cache_groups;
660
661         rc = cpu_cache_sysfs_init(cpu);
662         if (unlikely(rc < 0))
663                 return rc;
664
665         parent = per_cpu_cache_dev(cpu);
666         for (i = 0; i < cache_leaves(cpu); i++) {
667                 this_leaf = per_cpu_cacheinfo_idx(cpu, i);
668                 if (this_leaf->disable_sysfs)
669                         continue;
670                 if (this_leaf->type == CACHE_TYPE_NOCACHE)
671                         break;
672                 cache_groups = cache_get_attribute_groups(this_leaf);
673                 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
674                                            "index%1u", i);
675                 if (IS_ERR(ci_dev)) {
676                         rc = PTR_ERR(ci_dev);
677                         goto err;
678                 }
679                 per_cache_index_dev(cpu, i) = ci_dev;
680         }
681         cpumask_set_cpu(cpu, &cache_dev_map);
682
683         return 0;
684 err:
685         cpu_cache_sysfs_exit(cpu);
686         return rc;
687 }
688
689 static int cacheinfo_cpu_online(unsigned int cpu)
690 {
691         int rc = detect_cache_attributes(cpu);
692
693         if (rc)
694                 return rc;
695         rc = cache_add_dev(cpu);
696         if (rc)
697                 free_cache_attributes(cpu);
698         return rc;
699 }
700
701 static int cacheinfo_cpu_pre_down(unsigned int cpu)
702 {
703         if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
704                 cpu_cache_sysfs_exit(cpu);
705
706         free_cache_attributes(cpu);
707         return 0;
708 }
709
710 static int __init cacheinfo_sysfs_init(void)
711 {
712         return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
713                                  "base/cacheinfo:online",
714                                  cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
715 }
716 device_initcall(cacheinfo_sysfs_init);