Commit | Line | Data |
---|---|---|
6ee97d35 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2ef7a295 JL |
2 | /* |
3 | * Arch specific cpu topology information | |
4 | * | |
5 | * Copyright (C) 2016, ARM Ltd. | |
6 | * Written by: Juri Lelli, ARM Ltd. | |
2ef7a295 JL |
7 | */ |
8 | ||
9 | #include <linux/acpi.h> | |
38db9b95 | 10 | #include <linux/cacheinfo.h> |
2ef7a295 JL |
11 | #include <linux/cpu.h> |
12 | #include <linux/cpufreq.h> | |
13 | #include <linux/device.h> | |
14 | #include <linux/of.h> | |
15 | #include <linux/slab.h> | |
2ef7a295 | 16 | #include <linux/sched/topology.h> |
bb1fbdd3 | 17 | #include <linux/cpuset.h> |
60c1b220 AP |
18 | #include <linux/cpumask.h> |
19 | #include <linux/init.h> | |
83150f5d | 20 | #include <linux/rcupdate.h> |
60c1b220 | 21 | #include <linux/sched.h> |
9942cb22 | 22 | #include <linux/units.h> |
2ef7a295 | 23 | |
c3d438ee LL |
24 | #define CREATE_TRACE_POINTS |
25 | #include <trace/events/thermal_pressure.h> | |
26 | ||
83150f5d | 27 | static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); |
01e055c1 VK |
28 | static struct cpumask scale_freq_counters_mask; |
29 | static bool scale_freq_invariant; | |
9942cb22 VG |
30 | DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1; |
31 | EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref); | |
01e055c1 VK |
32 | |
33 | static bool supports_scale_freq_counters(const struct cpumask *cpus) | |
34 | { | |
35 | return cpumask_subset(cpus, &scale_freq_counters_mask); | |
36 | } | |
37 | ||
15e5d5b4 VS |
38 | bool topology_scale_freq_invariant(void) |
39 | { | |
40 | return cpufreq_supports_freq_invariance() || | |
01e055c1 VK |
41 | supports_scale_freq_counters(cpu_online_mask); |
42 | } | |
43 | ||
44 | static void update_scale_freq_invariant(bool status) | |
45 | { | |
46 | if (scale_freq_invariant == status) | |
47 | return; | |
48 | ||
49 | /* | |
50 | * Task scheduler behavior depends on frequency invariance support, | |
51 | * either cpufreq or counter driven. If the support status changes as | |
52 | * a result of counter initialisation and use, retrigger the build of | |
53 | * scheduling domains to ensure the information is propagated properly. | |
54 | */ | |
55 | if (topology_scale_freq_invariant() == status) { | |
56 | scale_freq_invariant = status; | |
57 | rebuild_sched_domains_energy(); | |
58 | } | |
59 | } | |
60 | ||
61 | void topology_set_scale_freq_source(struct scale_freq_data *data, | |
62 | const struct cpumask *cpus) | |
63 | { | |
64 | struct scale_freq_data *sfd; | |
65 | int cpu; | |
66 | ||
67 | /* | |
68 | * Avoid calling rebuild_sched_domains() unnecessarily if FIE is | |
69 | * supported by cpufreq. | |
70 | */ | |
71 | if (cpumask_empty(&scale_freq_counters_mask)) | |
72 | scale_freq_invariant = topology_scale_freq_invariant(); | |
73 | ||
83150f5d VK |
74 | rcu_read_lock(); |
75 | ||
01e055c1 | 76 | for_each_cpu(cpu, cpus) { |
83150f5d | 77 | sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); |
01e055c1 VK |
78 | |
79 | /* Use ARCH provided counters whenever possible */ | |
80 | if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) { | |
83150f5d | 81 | rcu_assign_pointer(per_cpu(sft_data, cpu), data); |
01e055c1 VK |
82 | cpumask_set_cpu(cpu, &scale_freq_counters_mask); |
83 | } | |
84 | } | |
85 | ||
83150f5d VK |
86 | rcu_read_unlock(); |
87 | ||
01e055c1 | 88 | update_scale_freq_invariant(true); |
15e5d5b4 | 89 | } |
2f533958 | 90 | EXPORT_SYMBOL_GPL(topology_set_scale_freq_source); |
15e5d5b4 | 91 | |
01e055c1 VK |
92 | void topology_clear_scale_freq_source(enum scale_freq_source source, |
93 | const struct cpumask *cpus) | |
cd0ed03a | 94 | { |
01e055c1 VK |
95 | struct scale_freq_data *sfd; |
96 | int cpu; | |
97 | ||
83150f5d VK |
98 | rcu_read_lock(); |
99 | ||
01e055c1 | 100 | for_each_cpu(cpu, cpus) { |
83150f5d | 101 | sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); |
01e055c1 VK |
102 | |
103 | if (sfd && sfd->source == source) { | |
83150f5d | 104 | rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); |
01e055c1 VK |
105 | cpumask_clear_cpu(cpu, &scale_freq_counters_mask); |
106 | } | |
107 | } | |
108 | ||
83150f5d VK |
109 | rcu_read_unlock(); |
110 | ||
111 | /* | |
112 | * Make sure all references to previous sft_data are dropped to avoid | |
113 | * use-after-free races. | |
114 | */ | |
115 | synchronize_rcu(); | |
116 | ||
01e055c1 | 117 | update_scale_freq_invariant(false); |
cd0ed03a | 118 | } |
2f533958 | 119 | EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source); |
01e055c1 VK |
120 | |
121 | void topology_scale_freq_tick(void) | |
122 | { | |
83150f5d | 123 | struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data)); |
01e055c1 VK |
124 | |
125 | if (sfd) | |
126 | sfd->set_freq_scale(); | |
127 | } | |
128 | ||
eec73529 | 129 | DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; |
2f533958 | 130 | EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale); |
2ef7a295 | 131 | |
a20b7053 IV |
132 | void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, |
133 | unsigned long max_freq) | |
2ef7a295 | 134 | { |
0e27c567 DE |
135 | unsigned long scale; |
136 | int i; | |
137 | ||
0a10d3fe IV |
138 | if (WARN_ON_ONCE(!cur_freq || !max_freq)) |
139 | return; | |
140 | ||
cd0ed03a IV |
141 | /* |
142 | * If the use of counters for FIE is enabled, just return as we don't | |
143 | * want to update the scale factor with information from CPUFREQ. | |
144 | * Instead the scale factor will be updated from arch_scale_freq_tick. | |
145 | */ | |
01e055c1 | 146 | if (supports_scale_freq_counters(cpus)) |
cd0ed03a IV |
147 | return; |
148 | ||
0e27c567 DE |
149 | scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; |
150 | ||
151 | for_each_cpu(i, cpus) | |
eec73529 | 152 | per_cpu(arch_freq_scale, i) = scale; |
2ef7a295 JL |
153 | } |
154 | ||
8216f588 | 155 | DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; |
275157b3 | 156 | EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); |
2ef7a295 | 157 | |
4ca4f26a | 158 | void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) |
2ef7a295 JL |
159 | { |
160 | per_cpu(cpu_scale, cpu) = capacity; | |
161 | } | |
162 | ||
25980c7a VS |
163 | DEFINE_PER_CPU(unsigned long, thermal_pressure); |
164 | ||
c214f124 LL |
165 | /** |
166 | * topology_update_thermal_pressure() - Update thermal pressure for CPUs | |
167 | * @cpus : The related CPUs for which capacity has been reduced | |
168 | * @capped_freq : The maximum allowed frequency that CPUs can run at | |
169 | * | |
170 | * Update the value of thermal pressure for all @cpus in the mask. The | |
171 | * cpumask should include all (online+offline) affected CPUs, to avoid | |
172 | * operating on stale data when hot-plug is used for some CPUs. The | |
173 | * @capped_freq reflects the currently allowed max CPUs frequency due to | |
174 | * thermal capping. It might be also a boost frequency value, which is bigger | |
9942cb22 VG |
175 | * than the internal 'capacity_freq_ref' max frequency. In such case the |
176 | * pressure value should simply be removed, since this is an indication that | |
177 | * there is no thermal throttling. The @capped_freq must be provided in kHz. | |
c214f124 LL |
178 | */ |
179 | void topology_update_thermal_pressure(const struct cpumask *cpus, | |
180 | unsigned long capped_freq) | |
181 | { | |
7e97b3dc | 182 | unsigned long max_capacity, capacity, th_pressure; |
c214f124 LL |
183 | u32 max_freq; |
184 | int cpu; | |
185 | ||
186 | cpu = cpumask_first(cpus); | |
187 | max_capacity = arch_scale_cpu_capacity(cpu); | |
9942cb22 | 188 | max_freq = arch_scale_freq_ref(cpu); |
c214f124 LL |
189 | |
190 | /* | |
191 | * Handle properly the boost frequencies, which should simply clean | |
192 | * the thermal pressure value. | |
193 | */ | |
194 | if (max_freq <= capped_freq) | |
195 | capacity = max_capacity; | |
196 | else | |
197 | capacity = mult_frac(max_capacity, capped_freq, max_freq); | |
198 | ||
7e97b3dc LL |
199 | th_pressure = max_capacity - capacity; |
200 | ||
c3d438ee LL |
201 | trace_thermal_pressure_update(cpu, th_pressure); |
202 | ||
7e97b3dc LL |
203 | for_each_cpu(cpu, cpus) |
204 | WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); | |
c214f124 LL |
205 | } |
206 | EXPORT_SYMBOL_GPL(topology_update_thermal_pressure); | |
207 | ||
2ef7a295 JL |
208 | static ssize_t cpu_capacity_show(struct device *dev, |
209 | struct device_attribute *attr, | |
210 | char *buf) | |
211 | { | |
212 | struct cpu *cpu = container_of(dev, struct cpu, dev); | |
213 | ||
aa838896 | 214 | return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); |
2ef7a295 JL |
215 | } |
216 | ||
bb1fbdd3 MR |
217 | static void update_topology_flags_workfn(struct work_struct *work); |
218 | static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); | |
219 | ||
5d777b18 | 220 | static DEVICE_ATTR_RO(cpu_capacity); |
2ef7a295 | 221 | |
c72bbf20 JM |
222 | static int cpu_capacity_sysctl_add(unsigned int cpu) |
223 | { | |
224 | struct device *cpu_dev = get_cpu_device(cpu); | |
225 | ||
226 | if (!cpu_dev) | |
227 | return -ENOENT; | |
228 | ||
229 | device_create_file(cpu_dev, &dev_attr_cpu_capacity); | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
234 | static int cpu_capacity_sysctl_remove(unsigned int cpu) | |
235 | { | |
236 | struct device *cpu_dev = get_cpu_device(cpu); | |
237 | ||
238 | if (!cpu_dev) | |
239 | return -ENOENT; | |
240 | ||
241 | device_remove_file(cpu_dev, &dev_attr_cpu_capacity); | |
242 | ||
243 | return 0; | |
244 | } | |
245 | ||
2ef7a295 JL |
246 | static int register_cpu_capacity_sysctl(void) |
247 | { | |
c72bbf20 JM |
248 | cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", |
249 | cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); | |
2ef7a295 JL |
250 | |
251 | return 0; | |
252 | } | |
253 | subsys_initcall(register_cpu_capacity_sysctl); | |
254 | ||
bb1fbdd3 MR |
255 | static int update_topology; |
256 | ||
257 | int topology_update_cpu_topology(void) | |
258 | { | |
259 | return update_topology; | |
260 | } | |
261 | ||
262 | /* | |
263 | * Updating the sched_domains can't be done directly from cpufreq callbacks | |
264 | * due to locking, so queue the work for later. | |
265 | */ | |
266 | static void update_topology_flags_workfn(struct work_struct *work) | |
267 | { | |
268 | update_topology = 1; | |
269 | rebuild_sched_domains(); | |
270 | pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); | |
271 | update_topology = 0; | |
272 | } | |
273 | ||
2ef7a295 | 274 | static u32 *raw_capacity; |
62de1161 | 275 | |
82d8ba71 | 276 | static int free_raw_capacity(void) |
62de1161 VK |
277 | { |
278 | kfree(raw_capacity); | |
279 | raw_capacity = NULL; | |
280 | ||
281 | return 0; | |
282 | } | |
2ef7a295 | 283 | |
4ca4f26a | 284 | void topology_normalize_cpu_scale(void) |
2ef7a295 JL |
285 | { |
286 | u64 capacity; | |
b8fe128d | 287 | u64 capacity_scale; |
2ef7a295 JL |
288 | int cpu; |
289 | ||
62de1161 | 290 | if (!raw_capacity) |
2ef7a295 JL |
291 | return; |
292 | ||
b8fe128d | 293 | capacity_scale = 1; |
2ef7a295 | 294 | for_each_possible_cpu(cpu) { |
9942cb22 | 295 | capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); |
b8fe128d JC |
296 | capacity_scale = max(capacity, capacity_scale); |
297 | } | |
298 | ||
299 | pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); | |
300 | for_each_possible_cpu(cpu) { | |
9942cb22 | 301 | capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); |
b8fe128d JC |
302 | capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, |
303 | capacity_scale); | |
4ca4f26a | 304 | topology_set_cpu_scale(cpu, capacity); |
2ef7a295 | 305 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", |
8ec59c0f | 306 | cpu, topology_get_cpu_scale(cpu)); |
2ef7a295 | 307 | } |
2ef7a295 JL |
308 | } |
309 | ||
805df296 | 310 | bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) |
2ef7a295 | 311 | { |
b8fe128d | 312 | struct clk *cpu_clk; |
62de1161 | 313 | static bool cap_parsing_failed; |
805df296 | 314 | int ret; |
2ef7a295 JL |
315 | u32 cpu_capacity; |
316 | ||
317 | if (cap_parsing_failed) | |
805df296 | 318 | return false; |
2ef7a295 | 319 | |
3eeba1a2 | 320 | ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", |
2ef7a295 JL |
321 | &cpu_capacity); |
322 | if (!ret) { | |
323 | if (!raw_capacity) { | |
324 | raw_capacity = kcalloc(num_possible_cpus(), | |
325 | sizeof(*raw_capacity), | |
326 | GFP_KERNEL); | |
327 | if (!raw_capacity) { | |
2ef7a295 | 328 | cap_parsing_failed = true; |
805df296 | 329 | return false; |
2ef7a295 JL |
330 | } |
331 | } | |
2ef7a295 | 332 | raw_capacity[cpu] = cpu_capacity; |
6ef2541f RH |
333 | pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", |
334 | cpu_node, raw_capacity[cpu]); | |
b8fe128d JC |
335 | |
336 | /* | |
9942cb22 | 337 | * Update capacity_freq_ref for calculating early boot CPU capacities. |
b8fe128d JC |
338 | * For non-clk CPU DVFS mechanism, there's no way to get the |
339 | * frequency value now, assuming they are running at the same | |
9942cb22 | 340 | * frequency (by keeping the initial capacity_freq_ref value). |
b8fe128d JC |
341 | */ |
342 | cpu_clk = of_clk_get(cpu_node, 0); | |
4dfff3d5 | 343 | if (!PTR_ERR_OR_ZERO(cpu_clk)) { |
9942cb22 VG |
344 | per_cpu(capacity_freq_ref, cpu) = |
345 | clk_get_rate(cpu_clk) / HZ_PER_KHZ; | |
4dfff3d5 JC |
346 | clk_put(cpu_clk); |
347 | } | |
2ef7a295 JL |
348 | } else { |
349 | if (raw_capacity) { | |
6ef2541f RH |
350 | pr_err("cpu_capacity: missing %pOF raw capacity\n", |
351 | cpu_node); | |
2ef7a295 JL |
352 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); |
353 | } | |
354 | cap_parsing_failed = true; | |
62de1161 | 355 | free_raw_capacity(); |
2ef7a295 JL |
356 | } |
357 | ||
358 | return !ret; | |
359 | } | |
360 | ||
1f023007 VG |
361 | void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) |
362 | { | |
363 | } | |
364 | ||
9924fbb5 IV |
365 | #ifdef CONFIG_ACPI_CPPC_LIB |
366 | #include <acpi/cppc_acpi.h> | |
367 | ||
368 | void topology_init_cpu_capacity_cppc(void) | |
369 | { | |
5477fa24 | 370 | u64 capacity, capacity_scale = 0; |
9924fbb5 IV |
371 | struct cppc_perf_caps perf_caps; |
372 | int cpu; | |
373 | ||
a2a9d185 | 374 | if (likely(!acpi_cpc_valid())) |
9924fbb5 IV |
375 | return; |
376 | ||
377 | raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity), | |
378 | GFP_KERNEL); | |
379 | if (!raw_capacity) | |
380 | return; | |
381 | ||
382 | for_each_possible_cpu(cpu) { | |
383 | if (!cppc_get_perf_caps(cpu, &perf_caps) && | |
384 | (perf_caps.highest_perf >= perf_caps.nominal_perf) && | |
385 | (perf_caps.highest_perf >= perf_caps.lowest_perf)) { | |
386 | raw_capacity[cpu] = perf_caps.highest_perf; | |
5477fa24 VG |
387 | capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); |
388 | ||
389 | per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); | |
390 | ||
9924fbb5 IV |
391 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n", |
392 | cpu, raw_capacity[cpu]); | |
393 | continue; | |
394 | } | |
395 | ||
396 | pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); | |
397 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); | |
398 | goto exit; | |
399 | } | |
400 | ||
5477fa24 | 401 | for_each_possible_cpu(cpu) { |
1f023007 VG |
402 | freq_inv_set_max_ratio(cpu, |
403 | per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); | |
404 | ||
5477fa24 VG |
405 | capacity = raw_capacity[cpu]; |
406 | capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, | |
407 | capacity_scale); | |
408 | topology_set_cpu_scale(cpu, capacity); | |
409 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", | |
410 | cpu, topology_get_cpu_scale(cpu)); | |
411 | } | |
412 | ||
9924fbb5 IV |
413 | schedule_work(&update_topology_flags_work); |
414 | pr_debug("cpu_capacity: cpu_capacity initialization done\n"); | |
415 | ||
416 | exit: | |
417 | free_raw_capacity(); | |
418 | } | |
419 | #endif | |
420 | ||
2ef7a295 | 421 | #ifdef CONFIG_CPU_FREQ |
9de9a449 GI |
422 | static cpumask_var_t cpus_to_visit; |
423 | static void parsing_done_workfn(struct work_struct *work); | |
424 | static DECLARE_WORK(parsing_done_work, parsing_done_workfn); | |
2ef7a295 | 425 | |
9de9a449 | 426 | static int |
2ef7a295 JL |
427 | init_cpu_capacity_callback(struct notifier_block *nb, |
428 | unsigned long val, | |
429 | void *data) | |
430 | { | |
431 | struct cpufreq_policy *policy = data; | |
432 | int cpu; | |
433 | ||
40f0fc2a | 434 | if (val != CPUFREQ_CREATE_POLICY) |
93a57081 VK |
435 | return 0; |
436 | ||
437 | pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", | |
438 | cpumask_pr_args(policy->related_cpus), | |
439 | cpumask_pr_args(cpus_to_visit)); | |
440 | ||
441 | cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); | |
442 | ||
1f023007 | 443 | for_each_cpu(cpu, policy->related_cpus) { |
9942cb22 | 444 | per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; |
1f023007 VG |
445 | freq_inv_set_max_ratio(cpu, |
446 | per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); | |
447 | } | |
93a57081 VK |
448 | |
449 | if (cpumask_empty(cpus_to_visit)) { | |
98323e9d VG |
450 | if (raw_capacity) { |
451 | topology_normalize_cpu_scale(); | |
452 | schedule_work(&update_topology_flags_work); | |
453 | free_raw_capacity(); | |
454 | } | |
93a57081 | 455 | pr_debug("cpu_capacity: parsing done\n"); |
93a57081 VK |
456 | schedule_work(&parsing_done_work); |
457 | } | |
458 | ||
2ef7a295 JL |
459 | return 0; |
460 | } | |
461 | ||
9de9a449 | 462 | static struct notifier_block init_cpu_capacity_notifier = { |
2ef7a295 JL |
463 | .notifier_call = init_cpu_capacity_callback, |
464 | }; | |
465 | ||
466 | static int __init register_cpufreq_notifier(void) | |
467 | { | |
5408211a DE |
468 | int ret; |
469 | ||
2ef7a295 | 470 | /* |
9924fbb5 IV |
471 | * On ACPI-based systems skip registering cpufreq notifier as cpufreq |
472 | * information is not needed for cpu capacity initialization. | |
2ef7a295 | 473 | */ |
98323e9d | 474 | if (!acpi_disabled) |
2ef7a295 JL |
475 | return -EINVAL; |
476 | ||
0fd33116 | 477 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) |
2ef7a295 | 478 | return -ENOMEM; |
2ef7a295 JL |
479 | |
480 | cpumask_copy(cpus_to_visit, cpu_possible_mask); | |
481 | ||
5408211a DE |
482 | ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, |
483 | CPUFREQ_POLICY_NOTIFIER); | |
484 | ||
485 | if (ret) | |
486 | free_cpumask_var(cpus_to_visit); | |
487 | ||
488 | return ret; | |
2ef7a295 JL |
489 | } |
490 | core_initcall(register_cpufreq_notifier); | |
491 | ||
9de9a449 | 492 | static void parsing_done_workfn(struct work_struct *work) |
2ef7a295 JL |
493 | { |
494 | cpufreq_unregister_notifier(&init_cpu_capacity_notifier, | |
495 | CPUFREQ_POLICY_NOTIFIER); | |
5408211a | 496 | free_cpumask_var(cpus_to_visit); |
2ef7a295 JL |
497 | } |
498 | ||
499 | #else | |
2ef7a295 JL |
500 | core_initcall(free_raw_capacity); |
501 | #endif | |
60c1b220 AP |
502 | |
503 | #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) | |
f3c19481 ZT |
504 | /* |
505 | * This function returns the logic cpu number of the node. | |
506 | * There are basically three kinds of return values: | |
507 | * (1) logic cpu number which is > 0. | |
508 | * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but | |
509 | * there is no possible logical CPU in the kernel to match. This happens | |
510 | * when CONFIG_NR_CPUS is configure to be smaller than the number of | |
511 | * CPU nodes in DT. We need to just ignore this case. | |
512 | * (3) -1 if the node does not exist in the device tree | |
513 | */ | |
60c1b220 AP |
514 | static int __init get_cpu_for_node(struct device_node *node) |
515 | { | |
516 | struct device_node *cpu_node; | |
517 | int cpu; | |
518 | ||
519 | cpu_node = of_parse_phandle(node, "cpu", 0); | |
520 | if (!cpu_node) | |
521 | return -1; | |
522 | ||
523 | cpu = of_cpu_node_to_id(cpu_node); | |
524 | if (cpu >= 0) | |
525 | topology_parse_cpu_capacity(cpu_node, cpu); | |
526 | else | |
f3c19481 ZT |
527 | pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", |
528 | cpu_node, cpumask_pr_args(cpu_possible_mask)); | |
60c1b220 AP |
529 | |
530 | of_node_put(cpu_node); | |
531 | return cpu; | |
532 | } | |
533 | ||
534 | static int __init parse_core(struct device_node *core, int package_id, | |
556c9678 | 535 | int cluster_id, int core_id) |
60c1b220 | 536 | { |
4a33691c | 537 | char name[20]; |
60c1b220 AP |
538 | bool leaf = true; |
539 | int i = 0; | |
540 | int cpu; | |
541 | struct device_node *t; | |
542 | ||
543 | do { | |
544 | snprintf(name, sizeof(name), "thread%d", i); | |
545 | t = of_get_child_by_name(core, name); | |
546 | if (t) { | |
547 | leaf = false; | |
548 | cpu = get_cpu_for_node(t); | |
549 | if (cpu >= 0) { | |
550 | cpu_topology[cpu].package_id = package_id; | |
556c9678 | 551 | cpu_topology[cpu].cluster_id = cluster_id; |
60c1b220 AP |
552 | cpu_topology[cpu].core_id = core_id; |
553 | cpu_topology[cpu].thread_id = i; | |
f3c19481 ZT |
554 | } else if (cpu != -ENODEV) { |
555 | pr_err("%pOF: Can't get CPU for thread\n", t); | |
60c1b220 AP |
556 | of_node_put(t); |
557 | return -EINVAL; | |
558 | } | |
559 | of_node_put(t); | |
560 | } | |
561 | i++; | |
562 | } while (t); | |
563 | ||
564 | cpu = get_cpu_for_node(core); | |
565 | if (cpu >= 0) { | |
566 | if (!leaf) { | |
567 | pr_err("%pOF: Core has both threads and CPU\n", | |
568 | core); | |
569 | return -EINVAL; | |
570 | } | |
571 | ||
572 | cpu_topology[cpu].package_id = package_id; | |
556c9678 | 573 | cpu_topology[cpu].cluster_id = cluster_id; |
60c1b220 | 574 | cpu_topology[cpu].core_id = core_id; |
f3c19481 | 575 | } else if (leaf && cpu != -ENODEV) { |
60c1b220 AP |
576 | pr_err("%pOF: Can't get CPU for leaf core\n", core); |
577 | return -EINVAL; | |
578 | } | |
579 | ||
580 | return 0; | |
581 | } | |
582 | ||
dea8c0b4 SH |
583 | static int __init parse_cluster(struct device_node *cluster, int package_id, |
584 | int cluster_id, int depth) | |
60c1b220 | 585 | { |
4a33691c | 586 | char name[20]; |
60c1b220 AP |
587 | bool leaf = true; |
588 | bool has_cores = false; | |
589 | struct device_node *c; | |
60c1b220 AP |
590 | int core_id = 0; |
591 | int i, ret; | |
592 | ||
593 | /* | |
594 | * First check for child clusters; we currently ignore any | |
595 | * information about the nesting of clusters and present the | |
596 | * scheduler with a flat list of them. | |
597 | */ | |
598 | i = 0; | |
599 | do { | |
600 | snprintf(name, sizeof(name), "cluster%d", i); | |
601 | c = of_get_child_by_name(cluster, name); | |
602 | if (c) { | |
603 | leaf = false; | |
dea8c0b4 | 604 | ret = parse_cluster(c, package_id, i, depth + 1); |
00e66e37 SH |
605 | if (depth > 0) |
606 | pr_warn("Topology for clusters of clusters not yet supported\n"); | |
60c1b220 AP |
607 | of_node_put(c); |
608 | if (ret != 0) | |
609 | return ret; | |
610 | } | |
611 | i++; | |
612 | } while (c); | |
613 | ||
614 | /* Now check for cores */ | |
615 | i = 0; | |
616 | do { | |
617 | snprintf(name, sizeof(name), "core%d", i); | |
618 | c = of_get_child_by_name(cluster, name); | |
619 | if (c) { | |
620 | has_cores = true; | |
621 | ||
622 | if (depth == 0) { | |
623 | pr_err("%pOF: cpu-map children should be clusters\n", | |
624 | c); | |
625 | of_node_put(c); | |
626 | return -EINVAL; | |
627 | } | |
628 | ||
629 | if (leaf) { | |
dea8c0b4 SH |
630 | ret = parse_core(c, package_id, cluster_id, |
631 | core_id++); | |
60c1b220 AP |
632 | } else { |
633 | pr_err("%pOF: Non-leaf cluster with core %s\n", | |
634 | cluster, name); | |
635 | ret = -EINVAL; | |
636 | } | |
637 | ||
638 | of_node_put(c); | |
639 | if (ret != 0) | |
640 | return ret; | |
641 | } | |
642 | i++; | |
643 | } while (c); | |
644 | ||
645 | if (leaf && !has_cores) | |
646 | pr_warn("%pOF: empty cluster\n", cluster); | |
647 | ||
60c1b220 AP |
648 | return 0; |
649 | } | |
650 | ||
dea8c0b4 SH |
651 | static int __init parse_socket(struct device_node *socket) |
652 | { | |
653 | char name[20]; | |
654 | struct device_node *c; | |
655 | bool has_socket = false; | |
656 | int package_id = 0, ret; | |
657 | ||
658 | do { | |
659 | snprintf(name, sizeof(name), "socket%d", package_id); | |
660 | c = of_get_child_by_name(socket, name); | |
661 | if (c) { | |
662 | has_socket = true; | |
663 | ret = parse_cluster(c, package_id, -1, 0); | |
664 | of_node_put(c); | |
665 | if (ret != 0) | |
666 | return ret; | |
667 | } | |
668 | package_id++; | |
669 | } while (c); | |
670 | ||
671 | if (!has_socket) | |
672 | ret = parse_cluster(socket, 0, -1, 0); | |
673 | ||
674 | return ret; | |
675 | } | |
676 | ||
60c1b220 AP |
677 | static int __init parse_dt_topology(void) |
678 | { | |
679 | struct device_node *cn, *map; | |
680 | int ret = 0; | |
681 | int cpu; | |
682 | ||
683 | cn = of_find_node_by_path("/cpus"); | |
684 | if (!cn) { | |
685 | pr_err("No CPU information found in DT\n"); | |
686 | return 0; | |
687 | } | |
688 | ||
689 | /* | |
690 | * When topology is provided cpu-map is essentially a root | |
691 | * cluster with restricted subnodes. | |
692 | */ | |
693 | map = of_get_child_by_name(cn, "cpu-map"); | |
694 | if (!map) | |
695 | goto out; | |
696 | ||
dea8c0b4 | 697 | ret = parse_socket(map); |
60c1b220 AP |
698 | if (ret != 0) |
699 | goto out_map; | |
700 | ||
701 | topology_normalize_cpu_scale(); | |
702 | ||
703 | /* | |
704 | * Check that all cores are in the topology; the SMP code will | |
705 | * only mark cores described in the DT as possible. | |
706 | */ | |
707 | for_each_possible_cpu(cpu) | |
5a01bb8e | 708 | if (cpu_topology[cpu].package_id < 0) { |
60c1b220 | 709 | ret = -EINVAL; |
5a01bb8e SH |
710 | break; |
711 | } | |
60c1b220 AP |
712 | |
713 | out_map: | |
714 | of_node_put(map); | |
715 | out: | |
716 | of_node_put(cn); | |
717 | return ret; | |
718 | } | |
ca74b316 | 719 | #endif |
60c1b220 AP |
720 | |
721 | /* | |
722 | * cpu topology table | |
723 | */ | |
724 | struct cpu_topology cpu_topology[NR_CPUS]; | |
725 | EXPORT_SYMBOL_GPL(cpu_topology); | |
726 | ||
727 | const struct cpumask *cpu_coregroup_mask(int cpu) | |
728 | { | |
729 | const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); | |
730 | ||
731 | /* Find the smaller of NUMA, core or LLC siblings */ | |
732 | if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { | |
733 | /* not numa in package, lets use the package siblings */ | |
734 | core_mask = &cpu_topology[cpu].core_sibling; | |
735 | } | |
f027db2f SH |
736 | |
737 | if (last_level_cache_is_valid(cpu)) { | |
60c1b220 AP |
738 | if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) |
739 | core_mask = &cpu_topology[cpu].llc_sibling; | |
740 | } | |
741 | ||
db1e5948 DH |
742 | /* |
743 | * For systems with no shared cpu-side LLC but with clusters defined, | |
744 | * extend core_mask to cluster_siblings. The sched domain builder will | |
745 | * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled. | |
746 | */ | |
747 | if (IS_ENABLED(CONFIG_SCHED_CLUSTER) && | |
748 | cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) | |
749 | core_mask = &cpu_topology[cpu].cluster_sibling; | |
750 | ||
60c1b220 AP |
751 | return core_mask; |
752 | } | |
753 | ||
c5e22fef JC |
754 | const struct cpumask *cpu_clustergroup_mask(int cpu) |
755 | { | |
bfcc4397 IV |
756 | /* |
757 | * Forbid cpu_clustergroup_mask() to span more or the same CPUs as | |
758 | * cpu_coregroup_mask(). | |
759 | */ | |
760 | if (cpumask_subset(cpu_coregroup_mask(cpu), | |
761 | &cpu_topology[cpu].cluster_sibling)) | |
5ac251c8 | 762 | return topology_sibling_cpumask(cpu); |
bfcc4397 | 763 | |
c5e22fef JC |
764 | return &cpu_topology[cpu].cluster_sibling; |
765 | } | |
766 | ||
60c1b220 AP |
767 | void update_siblings_masks(unsigned int cpuid) |
768 | { | |
769 | struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | |
3fcbf1c7 SH |
770 | int cpu, ret; |
771 | ||
772 | ret = detect_cache_attributes(cpuid); | |
9b03e793 | 773 | if (ret && ret != -ENOENT) |
5944ce09 | 774 | pr_info("Early cacheinfo allocation failed, ret = %d\n", ret); |
60c1b220 AP |
775 | |
776 | /* update core and thread sibling masks */ | |
777 | for_each_online_cpu(cpu) { | |
778 | cpu_topo = &cpu_topology[cpu]; | |
779 | ||
f027db2f | 780 | if (last_level_cache_is_shared(cpu, cpuid)) { |
60c1b220 AP |
781 | cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); |
782 | cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); | |
783 | } | |
784 | ||
785 | if (cpuid_topo->package_id != cpu_topo->package_id) | |
786 | continue; | |
787 | ||
3f828329 SH |
788 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); |
789 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); | |
790 | ||
791 | if (cpuid_topo->cluster_id != cpu_topo->cluster_id) | |
792 | continue; | |
793 | ||
9eb5e54f | 794 | if (cpuid_topo->cluster_id >= 0) { |
c5e22fef JC |
795 | cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); |
796 | cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling); | |
797 | } | |
798 | ||
60c1b220 AP |
799 | if (cpuid_topo->core_id != cpu_topo->core_id) |
800 | continue; | |
801 | ||
802 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); | |
803 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); | |
804 | } | |
805 | } | |
806 | ||
807 | static void clear_cpu_topology(int cpu) | |
808 | { | |
809 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; | |
810 | ||
811 | cpumask_clear(&cpu_topo->llc_sibling); | |
812 | cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); | |
813 | ||
c5e22fef JC |
814 | cpumask_clear(&cpu_topo->cluster_sibling); |
815 | cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); | |
816 | ||
60c1b220 AP |
817 | cpumask_clear(&cpu_topo->core_sibling); |
818 | cpumask_set_cpu(cpu, &cpu_topo->core_sibling); | |
819 | cpumask_clear(&cpu_topo->thread_sibling); | |
820 | cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); | |
821 | } | |
822 | ||
ca74b316 | 823 | void __init reset_cpu_topology(void) |
60c1b220 AP |
824 | { |
825 | unsigned int cpu; | |
826 | ||
827 | for_each_possible_cpu(cpu) { | |
828 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; | |
829 | ||
830 | cpu_topo->thread_id = -1; | |
831 | cpu_topo->core_id = -1; | |
c5e22fef | 832 | cpu_topo->cluster_id = -1; |
60c1b220 | 833 | cpu_topo->package_id = -1; |
60c1b220 AP |
834 | |
835 | clear_cpu_topology(cpu); | |
836 | } | |
837 | } | |
838 | ||
839 | void remove_cpu_topology(unsigned int cpu) | |
840 | { | |
841 | int sibling; | |
842 | ||
843 | for_each_cpu(sibling, topology_core_cpumask(cpu)) | |
844 | cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); | |
845 | for_each_cpu(sibling, topology_sibling_cpumask(cpu)) | |
846 | cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); | |
4cc4cc28 WS |
847 | for_each_cpu(sibling, topology_cluster_cpumask(cpu)) |
848 | cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); | |
60c1b220 AP |
849 | for_each_cpu(sibling, topology_llc_cpumask(cpu)) |
850 | cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); | |
851 | ||
852 | clear_cpu_topology(cpu); | |
853 | } | |
854 | ||
855 | __weak int __init parse_acpi_topology(void) | |
856 | { | |
857 | return 0; | |
858 | } | |
859 | ||
ca74b316 | 860 | #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) |
60c1b220 AP |
861 | void __init init_cpu_topology(void) |
862 | { | |
5944ce09 | 863 | int cpu, ret; |
38db9b95 | 864 | |
60c1b220 | 865 | reset_cpu_topology(); |
38db9b95 SH |
866 | ret = parse_acpi_topology(); |
867 | if (!ret) | |
868 | ret = of_have_populated_dt() && parse_dt_topology(); | |
60c1b220 | 869 | |
38db9b95 SH |
870 | if (ret) { |
871 | /* | |
872 | * Discard anything that was parsed if we hit an error so we | |
e103d554 RR |
873 | * don't use partial information. But do not return yet to give |
874 | * arch-specific early cache level detection a chance to run. | |
38db9b95 | 875 | */ |
60c1b220 | 876 | reset_cpu_topology(); |
38db9b95 | 877 | } |
5944ce09 PG |
878 | |
879 | for_each_possible_cpu(cpu) { | |
880 | ret = fetch_cache_info(cpu); | |
35223401 PG |
881 | if (!ret) |
882 | continue; | |
883 | else if (ret != -ENOENT) | |
5944ce09 | 884 | pr_err("Early cacheinfo failed, ret = %d\n", ret); |
35223401 | 885 | return; |
5944ce09 | 886 | } |
60c1b220 | 887 | } |
456797da CD |
888 | |
889 | void store_cpu_topology(unsigned int cpuid) | |
890 | { | |
891 | struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; | |
892 | ||
893 | if (cpuid_topo->package_id != -1) | |
894 | goto topology_populated; | |
895 | ||
896 | cpuid_topo->thread_id = -1; | |
897 | cpuid_topo->core_id = cpuid; | |
898 | cpuid_topo->package_id = cpu_to_node(cpuid); | |
899 | ||
900 | pr_debug("CPU%u: package %d core %d thread %d\n", | |
901 | cpuid, cpuid_topo->package_id, cpuid_topo->core_id, | |
902 | cpuid_topo->thread_id); | |
903 | ||
904 | topology_populated: | |
905 | update_siblings_masks(cpuid); | |
906 | } | |
60c1b220 | 907 | #endif |