2 * Arch specific cpu topology information
4 * Copyright (C) 2016, ARM Ltd.
5 * Written by: Juri Lelli, ARM Ltd.
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
11 * Released under the GPLv2 only.
12 * SPDX-License-Identifier: GPL-2.0
15 #include <linux/acpi.h>
16 #include <linux/cpu.h>
17 #include <linux/cpufreq.h>
18 #include <linux/device.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <linux/sched/topology.h>
24 static DEFINE_MUTEX(cpu_scale_mutex);
25 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
27 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
29 return per_cpu(cpu_scale, cpu);
32 void set_capacity_scale(unsigned int cpu, unsigned long capacity)
34 per_cpu(cpu_scale, cpu) = capacity;
37 static ssize_t cpu_capacity_show(struct device *dev,
38 struct device_attribute *attr,
41 struct cpu *cpu = container_of(dev, struct cpu, dev);
43 return sprintf(buf, "%lu\n",
44 arch_scale_cpu_capacity(NULL, cpu->dev.id));
47 static ssize_t cpu_capacity_store(struct device *dev,
48 struct device_attribute *attr,
52 struct cpu *cpu = container_of(dev, struct cpu, dev);
53 int this_cpu = cpu->dev.id;
55 unsigned long new_capacity;
61 ret = kstrtoul(buf, 0, &new_capacity);
64 if (new_capacity > SCHED_CAPACITY_SCALE)
67 mutex_lock(&cpu_scale_mutex);
68 for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
69 set_capacity_scale(i, new_capacity);
70 mutex_unlock(&cpu_scale_mutex);
75 static DEVICE_ATTR_RW(cpu_capacity);
77 static int register_cpu_capacity_sysctl(void)
82 for_each_possible_cpu(i) {
83 cpu = get_cpu_device(i);
85 pr_err("%s: too early to get CPU%d device!\n",
89 device_create_file(cpu, &dev_attr_cpu_capacity);
94 subsys_initcall(register_cpu_capacity_sysctl);
96 static u32 capacity_scale;
97 static u32 *raw_capacity;
98 static bool cap_parsing_failed;
100 void normalize_cpu_capacity(void)
105 if (!raw_capacity || cap_parsing_failed)
108 pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
109 mutex_lock(&cpu_scale_mutex);
110 for_each_possible_cpu(cpu) {
111 pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
112 cpu, raw_capacity[cpu]);
113 capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
115 set_capacity_scale(cpu, capacity);
116 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
117 cpu, arch_scale_cpu_capacity(NULL, cpu));
119 mutex_unlock(&cpu_scale_mutex);
122 int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
127 if (cap_parsing_failed)
130 ret = of_property_read_u32(cpu_node,
131 "capacity-dmips-mhz",
135 raw_capacity = kcalloc(num_possible_cpus(),
136 sizeof(*raw_capacity),
139 pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
140 cap_parsing_failed = true;
144 capacity_scale = max(cpu_capacity, capacity_scale);
145 raw_capacity[cpu] = cpu_capacity;
146 pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
147 cpu_node->full_name, raw_capacity[cpu]);
150 pr_err("cpu_capacity: missing %s raw capacity\n",
151 cpu_node->full_name);
152 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
154 cap_parsing_failed = true;
161 #ifdef CONFIG_CPU_FREQ
162 static cpumask_var_t cpus_to_visit;
163 static bool cap_parsing_done;
164 static void parsing_done_workfn(struct work_struct *work);
165 static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
168 init_cpu_capacity_callback(struct notifier_block *nb,
172 struct cpufreq_policy *policy = data;
175 if (cap_parsing_failed || cap_parsing_done)
180 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
181 cpumask_pr_args(policy->related_cpus),
182 cpumask_pr_args(cpus_to_visit));
183 cpumask_andnot(cpus_to_visit,
185 policy->related_cpus);
186 for_each_cpu(cpu, policy->related_cpus) {
187 raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
188 policy->cpuinfo.max_freq / 1000UL;
189 capacity_scale = max(raw_capacity[cpu], capacity_scale);
191 if (cpumask_empty(cpus_to_visit)) {
192 normalize_cpu_capacity();
194 pr_debug("cpu_capacity: parsing done\n");
195 cap_parsing_done = true;
196 schedule_work(&parsing_done_work);
202 static struct notifier_block init_cpu_capacity_notifier = {
203 .notifier_call = init_cpu_capacity_callback,
206 static int __init register_cpufreq_notifier(void)
209 * on ACPI-based systems we need to use the default cpu capacity
210 * until we have the necessary code to parse the cpu capacity, so
211 * skip registering cpufreq notifier.
213 if (!acpi_disabled || !raw_capacity)
216 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
217 pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
221 cpumask_copy(cpus_to_visit, cpu_possible_mask);
223 return cpufreq_register_notifier(&init_cpu_capacity_notifier,
224 CPUFREQ_POLICY_NOTIFIER);
226 core_initcall(register_cpufreq_notifier);
228 static void parsing_done_workfn(struct work_struct *work)
230 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
231 CPUFREQ_POLICY_NOTIFIER);
235 static int __init free_raw_capacity(void)
241 core_initcall(free_raw_capacity);