1 // SPDX-License-Identifier: GPL-2.0
3 * Arch specific cpu topology information
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpufreq.h>
12 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/sched/topology.h>
16 #include <linux/cpuset.h>
17 #include <linux/cpumask.h>
18 #include <linux/init.h>
19 #include <linux/rcupdate.h>
20 #include <linux/sched.h>
22 static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
23 static struct cpumask scale_freq_counters_mask;
24 static bool scale_freq_invariant;
25 static DEFINE_PER_CPU(u32, freq_factor) = 1;
27 static bool supports_scale_freq_counters(const struct cpumask *cpus)
29 return cpumask_subset(cpus, &scale_freq_counters_mask);
32 bool topology_scale_freq_invariant(void)
34 return cpufreq_supports_freq_invariance() ||
35 supports_scale_freq_counters(cpu_online_mask);
38 static void update_scale_freq_invariant(bool status)
40 if (scale_freq_invariant == status)
44 * Task scheduler behavior depends on frequency invariance support,
45 * either cpufreq or counter driven. If the support status changes as
46 * a result of counter initialisation and use, retrigger the build of
47 * scheduling domains to ensure the information is propagated properly.
49 if (topology_scale_freq_invariant() == status) {
50 scale_freq_invariant = status;
51 rebuild_sched_domains_energy();
55 void topology_set_scale_freq_source(struct scale_freq_data *data,
56 const struct cpumask *cpus)
58 struct scale_freq_data *sfd;
62 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
63 * supported by cpufreq.
65 if (cpumask_empty(&scale_freq_counters_mask))
66 scale_freq_invariant = topology_scale_freq_invariant();
70 for_each_cpu(cpu, cpus) {
71 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
73 /* Use ARCH provided counters whenever possible */
74 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
75 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
76 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
82 update_scale_freq_invariant(true);
84 EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
86 void topology_clear_scale_freq_source(enum scale_freq_source source,
87 const struct cpumask *cpus)
89 struct scale_freq_data *sfd;
94 for_each_cpu(cpu, cpus) {
95 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
97 if (sfd && sfd->source == source) {
98 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
99 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
106 * Make sure all references to previous sft_data are dropped to avoid
107 * use-after-free races.
111 update_scale_freq_invariant(false);
113 EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
115 void topology_scale_freq_tick(void)
117 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
120 sfd->set_freq_scale();
123 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
124 EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
126 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
127 unsigned long max_freq)
132 if (WARN_ON_ONCE(!cur_freq || !max_freq))
136 * If the use of counters for FIE is enabled, just return as we don't
137 * want to update the scale factor with information from CPUFREQ.
138 * Instead the scale factor will be updated from arch_scale_freq_tick.
140 if (supports_scale_freq_counters(cpus))
143 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
145 for_each_cpu(i, cpus)
146 per_cpu(arch_freq_scale, i) = scale;
149 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
150 EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
152 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
154 per_cpu(cpu_scale, cpu) = capacity;
157 DEFINE_PER_CPU(unsigned long, thermal_pressure);
160 * topology_update_thermal_pressure() - Update thermal pressure for CPUs
161 * @cpus : The related CPUs for which capacity has been reduced
162 * @capped_freq : The maximum allowed frequency that CPUs can run at
164 * Update the value of thermal pressure for all @cpus in the mask. The
165 * cpumask should include all (online+offline) affected CPUs, to avoid
166 * operating on stale data when hot-plug is used for some CPUs. The
167 * @capped_freq reflects the currently allowed max CPUs frequency due to
168 * thermal capping. It might be also a boost frequency value, which is bigger
169 * than the internal 'freq_factor' max frequency. In such case the pressure
170 * value should simply be removed, since this is an indication that there is
171 * no thermal throttling. The @capped_freq must be provided in kHz.
173 void topology_update_thermal_pressure(const struct cpumask *cpus,
174 unsigned long capped_freq)
176 unsigned long max_capacity, capacity, th_pressure;
180 cpu = cpumask_first(cpus);
181 max_capacity = arch_scale_cpu_capacity(cpu);
182 max_freq = per_cpu(freq_factor, cpu);
184 /* Convert to MHz scale which is used in 'freq_factor' */
188 * Handle properly the boost frequencies, which should simply clean
189 * the thermal pressure value.
191 if (max_freq <= capped_freq)
192 capacity = max_capacity;
194 capacity = mult_frac(max_capacity, capped_freq, max_freq);
196 th_pressure = max_capacity - capacity;
198 for_each_cpu(cpu, cpus)
199 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
201 EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
203 static ssize_t cpu_capacity_show(struct device *dev,
204 struct device_attribute *attr,
207 struct cpu *cpu = container_of(dev, struct cpu, dev);
209 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
212 static void update_topology_flags_workfn(struct work_struct *work);
213 static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
215 static DEVICE_ATTR_RO(cpu_capacity);
217 static int register_cpu_capacity_sysctl(void)
222 for_each_possible_cpu(i) {
223 cpu = get_cpu_device(i);
225 pr_err("%s: too early to get CPU%d device!\n",
229 device_create_file(cpu, &dev_attr_cpu_capacity);
234 subsys_initcall(register_cpu_capacity_sysctl);
236 static int update_topology;
238 int topology_update_cpu_topology(void)
240 return update_topology;
244 * Updating the sched_domains can't be done directly from cpufreq callbacks
245 * due to locking, so queue the work for later.
247 static void update_topology_flags_workfn(struct work_struct *work)
250 rebuild_sched_domains();
251 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
255 static u32 *raw_capacity;
257 static int free_raw_capacity(void)
265 void topology_normalize_cpu_scale(void)
275 for_each_possible_cpu(cpu) {
276 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
277 capacity_scale = max(capacity, capacity_scale);
280 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
281 for_each_possible_cpu(cpu) {
282 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
283 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
285 topology_set_cpu_scale(cpu, capacity);
286 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
287 cpu, topology_get_cpu_scale(cpu));
291 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
294 static bool cap_parsing_failed;
298 if (cap_parsing_failed)
301 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
305 raw_capacity = kcalloc(num_possible_cpus(),
306 sizeof(*raw_capacity),
309 cap_parsing_failed = true;
313 raw_capacity[cpu] = cpu_capacity;
314 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
315 cpu_node, raw_capacity[cpu]);
318 * Update freq_factor for calculating early boot cpu capacities.
319 * For non-clk CPU DVFS mechanism, there's no way to get the
320 * frequency value now, assuming they are running at the same
321 * frequency (by keeping the initial freq_factor value).
323 cpu_clk = of_clk_get(cpu_node, 0);
324 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
325 per_cpu(freq_factor, cpu) =
326 clk_get_rate(cpu_clk) / 1000;
331 pr_err("cpu_capacity: missing %pOF raw capacity\n",
333 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
335 cap_parsing_failed = true;
342 #ifdef CONFIG_ACPI_CPPC_LIB
343 #include <acpi/cppc_acpi.h>
345 void topology_init_cpu_capacity_cppc(void)
347 struct cppc_perf_caps perf_caps;
350 if (likely(acpi_disabled || !acpi_cpc_valid()))
353 raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
358 for_each_possible_cpu(cpu) {
359 if (!cppc_get_perf_caps(cpu, &perf_caps) &&
360 (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
361 (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
362 raw_capacity[cpu] = perf_caps.highest_perf;
363 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
364 cpu, raw_capacity[cpu]);
368 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
369 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
373 topology_normalize_cpu_scale();
374 schedule_work(&update_topology_flags_work);
375 pr_debug("cpu_capacity: cpu_capacity initialization done\n");
382 #ifdef CONFIG_CPU_FREQ
383 static cpumask_var_t cpus_to_visit;
384 static void parsing_done_workfn(struct work_struct *work);
385 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
388 init_cpu_capacity_callback(struct notifier_block *nb,
392 struct cpufreq_policy *policy = data;
398 if (val != CPUFREQ_CREATE_POLICY)
401 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
402 cpumask_pr_args(policy->related_cpus),
403 cpumask_pr_args(cpus_to_visit));
405 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
407 for_each_cpu(cpu, policy->related_cpus)
408 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
410 if (cpumask_empty(cpus_to_visit)) {
411 topology_normalize_cpu_scale();
412 schedule_work(&update_topology_flags_work);
414 pr_debug("cpu_capacity: parsing done\n");
415 schedule_work(&parsing_done_work);
421 static struct notifier_block init_cpu_capacity_notifier = {
422 .notifier_call = init_cpu_capacity_callback,
425 static int __init register_cpufreq_notifier(void)
430 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
431 * information is not needed for cpu capacity initialization.
433 if (!acpi_disabled || !raw_capacity)
436 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
439 cpumask_copy(cpus_to_visit, cpu_possible_mask);
441 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
442 CPUFREQ_POLICY_NOTIFIER);
445 free_cpumask_var(cpus_to_visit);
449 core_initcall(register_cpufreq_notifier);
451 static void parsing_done_workfn(struct work_struct *work)
453 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
454 CPUFREQ_POLICY_NOTIFIER);
455 free_cpumask_var(cpus_to_visit);
459 core_initcall(free_raw_capacity);
462 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
464 * This function returns the logic cpu number of the node.
465 * There are basically three kinds of return values:
466 * (1) logic cpu number which is > 0.
467 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
468 * there is no possible logical CPU in the kernel to match. This happens
469 * when CONFIG_NR_CPUS is configure to be smaller than the number of
470 * CPU nodes in DT. We need to just ignore this case.
471 * (3) -1 if the node does not exist in the device tree
473 static int __init get_cpu_for_node(struct device_node *node)
475 struct device_node *cpu_node;
478 cpu_node = of_parse_phandle(node, "cpu", 0);
482 cpu = of_cpu_node_to_id(cpu_node);
484 topology_parse_cpu_capacity(cpu_node, cpu);
486 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
487 cpu_node, cpumask_pr_args(cpu_possible_mask));
489 of_node_put(cpu_node);
493 static int __init parse_core(struct device_node *core, int package_id,
500 struct device_node *t;
503 snprintf(name, sizeof(name), "thread%d", i);
504 t = of_get_child_by_name(core, name);
507 cpu = get_cpu_for_node(t);
509 cpu_topology[cpu].package_id = package_id;
510 cpu_topology[cpu].core_id = core_id;
511 cpu_topology[cpu].thread_id = i;
512 } else if (cpu != -ENODEV) {
513 pr_err("%pOF: Can't get CPU for thread\n", t);
522 cpu = get_cpu_for_node(core);
525 pr_err("%pOF: Core has both threads and CPU\n",
530 cpu_topology[cpu].package_id = package_id;
531 cpu_topology[cpu].core_id = core_id;
532 } else if (leaf && cpu != -ENODEV) {
533 pr_err("%pOF: Can't get CPU for leaf core\n", core);
540 static int __init parse_cluster(struct device_node *cluster, int depth)
544 bool has_cores = false;
545 struct device_node *c;
546 static int package_id __initdata;
551 * First check for child clusters; we currently ignore any
552 * information about the nesting of clusters and present the
553 * scheduler with a flat list of them.
557 snprintf(name, sizeof(name), "cluster%d", i);
558 c = of_get_child_by_name(cluster, name);
561 ret = parse_cluster(c, depth + 1);
569 /* Now check for cores */
572 snprintf(name, sizeof(name), "core%d", i);
573 c = of_get_child_by_name(cluster, name);
578 pr_err("%pOF: cpu-map children should be clusters\n",
585 ret = parse_core(c, package_id, core_id++);
587 pr_err("%pOF: Non-leaf cluster with core %s\n",
599 if (leaf && !has_cores)
600 pr_warn("%pOF: empty cluster\n", cluster);
608 static int __init parse_dt_topology(void)
610 struct device_node *cn, *map;
614 cn = of_find_node_by_path("/cpus");
616 pr_err("No CPU information found in DT\n");
621 * When topology is provided cpu-map is essentially a root
622 * cluster with restricted subnodes.
624 map = of_get_child_by_name(cn, "cpu-map");
628 ret = parse_cluster(map, 0);
632 topology_normalize_cpu_scale();
635 * Check that all cores are in the topology; the SMP code will
636 * only mark cores described in the DT as possible.
638 for_each_possible_cpu(cpu)
639 if (cpu_topology[cpu].package_id == -1)
653 struct cpu_topology cpu_topology[NR_CPUS];
654 EXPORT_SYMBOL_GPL(cpu_topology);
656 const struct cpumask *cpu_coregroup_mask(int cpu)
658 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
660 /* Find the smaller of NUMA, core or LLC siblings */
661 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
662 /* not numa in package, lets use the package siblings */
663 core_mask = &cpu_topology[cpu].core_sibling;
665 if (cpu_topology[cpu].llc_id != -1) {
666 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
667 core_mask = &cpu_topology[cpu].llc_sibling;
673 const struct cpumask *cpu_clustergroup_mask(int cpu)
675 return &cpu_topology[cpu].cluster_sibling;
678 void update_siblings_masks(unsigned int cpuid)
680 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
683 /* update core and thread sibling masks */
684 for_each_online_cpu(cpu) {
685 cpu_topo = &cpu_topology[cpu];
687 if (cpuid_topo->llc_id == cpu_topo->llc_id) {
688 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
689 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
692 if (cpuid_topo->package_id != cpu_topo->package_id)
695 if (cpuid_topo->cluster_id == cpu_topo->cluster_id &&
696 cpuid_topo->cluster_id != -1) {
697 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
698 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
701 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
702 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
704 if (cpuid_topo->core_id != cpu_topo->core_id)
707 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
708 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
712 static void clear_cpu_topology(int cpu)
714 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
716 cpumask_clear(&cpu_topo->llc_sibling);
717 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
719 cpumask_clear(&cpu_topo->cluster_sibling);
720 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
722 cpumask_clear(&cpu_topo->core_sibling);
723 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
724 cpumask_clear(&cpu_topo->thread_sibling);
725 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
728 void __init reset_cpu_topology(void)
732 for_each_possible_cpu(cpu) {
733 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
735 cpu_topo->thread_id = -1;
736 cpu_topo->core_id = -1;
737 cpu_topo->cluster_id = -1;
738 cpu_topo->package_id = -1;
739 cpu_topo->llc_id = -1;
741 clear_cpu_topology(cpu);
745 void remove_cpu_topology(unsigned int cpu)
749 for_each_cpu(sibling, topology_core_cpumask(cpu))
750 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
751 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
752 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
753 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
754 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
755 for_each_cpu(sibling, topology_llc_cpumask(cpu))
756 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
758 clear_cpu_topology(cpu);
761 __weak int __init parse_acpi_topology(void)
766 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
767 void __init init_cpu_topology(void)
769 reset_cpu_topology();
772 * Discard anything that was parsed if we hit an error so we
773 * don't use partial information.
775 if (parse_acpi_topology())
776 reset_cpu_topology();
777 else if (of_have_populated_dt() && parse_dt_topology())
778 reset_cpu_topology();